In [1]:
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
In [2]:
cd drive/MyDrive/project_files/
/content/drive/MyDrive/project_files
In [3]:
ls
csv2object_and_unetDemo.ipynb  project_files/  saved_models/  visualize.py
data/                          __pycache__/    utils.py
In [4]:
import csv
import json
import os
from PIL import Image

import pprint

import skimage.draw

import visualize
import utils as my_utils

import matplotlib.pyplot as plt
import cv2

import numpy as np

import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
from IPython.display import clear_output

Data structures

In [5]:
# global context variables
anno_path = './data/csv' # a folder of annotation files
img_path = './data/images' # a folder of image files
In [6]:
class annotation:
    # label: the class label 
    # xcoords: list of x-coordinates of a polygon
    # ycoords: list of y-coordinates of a polygon
    def __str__(self):
        return '* label: ' + self.label +  '\n  xcoords: ' + str(self.xcoords) +  '\n  ycoords: ' + str(self.ycoords) + '\n'
    def __repr__(self):
        return '* label: ' + self.label +  '\n  xcoords: ' + str(self.xcoords) +  '\n  ycoords: ' + str(self.ycoords) + '\n'
In [7]:
class img_meta:
    # img_name: image file name
    # annotations: the annotation list
    
    def __str__(self):
        return 'img_name: ' + self.img_name +  ', annotations: \n' + str(self.annotations)
    def __repr__(self):
        return 'img_name: ' + self.img_name +  ', annotations: \n' + str(self.annotations)
    
    def parse(self, anno_file_name):
        anno_file_path = os.path.join(anno_path, anno_file_name)        
        with open(anno_file_path, 'r') as file:
            csvReader = csv.DictReader(file)
            self.annotations = []
            for idx, rows in enumerate(csvReader):
                anno_obj = annotation()
                if idx == 0: self.img_name = rows['filename'] # get image file name from the first row
                region_attributes = rows['region_attributes']
                region_attributes = json.loads(region_attributes) # convert to dict
                anno_obj.label = region_attributes['class'] # get annotation class
                region_shape_attributes = json.loads(rows['region_shape_attributes'])
                anno_obj.xcoords = region_shape_attributes['all_points_x']
                anno_obj.ycoords = region_shape_attributes['all_points_y']
                self.annotations.append(anno_obj)
    
    #This function uses opencv rather than PIL to load image.
    #PIL has issues with the image channel
    def load_image(self):
        
        img_file_path = os.path.join(img_path, self.img_name)
        
        """Load the specified image and return a [H,W,3] Numpy array.
        """
        # Load image
        image = cv2.imread(img_file_path)
        # If grayscale. Convert to RGB for consistency.
        if image.ndim != 3:
            image = skimage.color.gray2rgb(image)
        # If has an alpha channel, remove it for consistency
        if image.shape[-1] == 4:
            image = image[..., :3]

        return image
    #horizontal image augmentation 
    def horizontal_image_augmentation(self, anno_file_name,image_width_size):
        anno_file_path = os.path.join(anno_path, anno_file_name)        
        with open(anno_file_path, 'r') as file:
            csvReader = csv.DictReader(file)
            self.annotations = []
            for idx, rows in enumerate(csvReader):
                anno_obj = annotation()
                if idx == 0: self.img_name = rows['filename'] # get image file name from the first row
                region_attributes = rows['region_attributes']
                region_attributes = json.loads(region_attributes) # convert to dict
                anno_obj.label = region_attributes['class'] # get annotation class
                region_shape_attributes = json.loads(rows['region_shape_attributes'])
                anno_obj.xcoords = region_shape_attributes['all_points_x']
                anno_obj.ycoords = region_shape_attributes['all_points_y']
                #print(type(anno_obj.xcoords))
                anno_obj.xcoords = [image_width_size-x for x in anno_obj.xcoords]
  
                self.annotations.append(anno_obj)
    #vertical image augmentation 
    def vertical_image_augmentation(self, anno_file_name,image_height_size):
        anno_file_path = os.path.join(anno_path, anno_file_name)        
        with open(anno_file_path, 'r') as file:
            csvReader = csv.DictReader(file)
            self.annotations = []
            for idx, rows in enumerate(csvReader):
                anno_obj = annotation()
                if idx == 0: self.img_name = rows['filename'] # get image file name from the first row
                region_attributes = rows['region_attributes']
                region_attributes = json.loads(region_attributes) # convert to dict
                anno_obj.label = region_attributes['class'] # get annotation class
                region_shape_attributes = json.loads(rows['region_shape_attributes'])
                anno_obj.xcoords = region_shape_attributes['all_points_x']
                anno_obj.ycoords = region_shape_attributes['all_points_y']
                #print(type(anno_obj.xcoords))
                anno_obj.ycoords = [image_height_size-x for x in anno_obj.ycoords]
  
                self.annotations.append(anno_obj)

Parse an annotation file

In [8]:
# test the parsing of one annotation file
test_anno_file = 'Adityap_via_project_42.csv'

img_meta_obj = img_meta()
img_meta_obj.parse(test_anno_file)
#print(type(img_meta_obj))
pprint.pprint(img_meta_obj)


img_obj = img_meta_obj.load_image()
image_shape=img_obj.shape
print(image_shape[0],image_shape[1])
#print(img_obj.shape)
print("###########")
img_meta_obj.horizontal_image_augmentation(test_anno_file,image_shape[0])
pprint.pprint(img_meta_obj)


print("###########")
img_meta_obj.vertical_image_augmentation(test_anno_file,image_shape[1])
pprint.pprint(img_meta_obj)
img_name: aditya-42.png, annotations: 
[* label: single-colony
  xcoords: [90, 90, 89, 88, 88, 88, 89, 90, 90]
  ycoords: [73, 74, 75, 74, 73, 72, 72, 71, 73]
, * label: single-colony
  xcoords: [109, 110, 110, 110, 110, 109, 108, 108]
  ycoords: [59, 60, 60, 61, 63, 63, 61, 60]
, * label: single-colony
  xcoords: [70, 70, 69, 69, 69, 69, 69, 70, 70, 71, 72, 72, 73, 73, 73, 72, 72]
  ycoords: [168, 169, 170, 170, 171, 171, 172, 173, 173, 173, 173, 172, 171, 170, 169, 168, 168]
, * label: single-colony
  xcoords: [54, 55, 56, 56, 56, 55]
  ycoords: [156, 154, 155, 157, 158, 157]
, * label: single-colony
  xcoords: [33, 34, 33, 32, 31, 32, 33]
  ycoords: [159, 160, 162, 161, 160, 159, 159]
, * label: single-colony
  xcoords: [42, 43, 43, 44, 44, 44, 43, 42]
  ycoords: [156, 155, 154, 154, 155, 156, 157, 156]
, * label: single-colony
  xcoords: [115, 116, 117, 117, 117, 116, 115]
  ycoords: [167, 167, 168, 169, 170, 170, 170]
, * label: single-colony
  xcoords: [75, 75, 74, 75, 75, 76, 76, 77, 77, 77, 76, 75]
  ycoords: [167, 168, 169, 170, 172, 173, 172, 171, 169, 168, 167, 166]
, * label: amorphous-colony
  xcoords: [138, 138, 137, 137, 137, 136, 136, 135, 135, 135, 135, 134, 134, 135, 135, 136, 136, 136, 137, 137, 137, 137, 138, 138, 138, 139, 139, 139, 140, 140, 140, 140, 139, 139, 139, 139, 138, 138]
  ycoords: [218, 217, 216, 216, 217, 217, 218, 218, 217, 216, 215, 215, 215, 214, 214, 214, 213, 212, 212, 212, 212, 213, 214, 214, 214, 214, 214, 215, 215, 216, 216, 217, 217, 218, 218, 218, 218, 218]
, * label: single-colony
  xcoords: [82, 82, 82, 82, 83, 84, 84, 85, 85, 85, 84, 83, 83, 82, 81, 81]
  ycoords: [73, 74, 75, 76, 76, 76, 75, 75, 74, 73, 73, 72, 71, 71, 72, 73]
, * label: single-colony
  xcoords: [83, 84, 85, 85, 85, 84, 84, 83, 83, 83]
  ycoords: [77, 77, 77, 78, 78, 79, 79, 79, 78, 77]
, * label: single-colony
  xcoords: [96, 96, 96, 97, 97, 97, 97]
  ycoords: [90, 91, 92, 93, 91, 90, 90]
, * label: amorphous-colony
  xcoords: [134, 134, 134, 135, 135, 135, 134, 134, 133, 133, 132, 131, 131, 131, 131, 131, 132, 132, 133]
  ycoords: [213, 212, 212, 211, 211, 210, 210, 210, 209, 209, 209, 210, 210, 211, 212, 212, 212, 211, 211]
, * label: single-colony
  xcoords: [145, 147, 147, 148, 148, 148, 148, 147, 145, 144, 144, 144, 145]
  ycoords: [183, 184, 184, 183, 183, 181, 180, 180, 180, 180, 182, 183, 184]
, * label: amorphous-colony
  xcoords: [181, 183, 184, 184, 185, 186, 186, 185, 185, 185, 186, 187, 187, 187, 188, 188, 188, 189, 188, 188, 188, 187, 189, 189, 189, 189, 187, 187, 187, 186, 185, 184, 183, 182, 181, 181, 181, 180, 182]
  ycoords: [159, 159, 158, 156, 156, 156, 157, 158, 158, 159, 159, 159, 158, 157, 157, 157, 158, 159, 159, 160, 160, 161, 161, 161, 162, 162, 162, 162, 163, 164, 164, 164, 163, 163, 162, 161, 161, 159, 159]
, * label: amorphous-colony
  xcoords: [177, 175, 174, 173, 171, 168, 169, 171, 173, 174, 174, 175, 176, 177, 177, 177]
  ycoords: [158, 159, 160, 160, 159, 159, 157, 156, 156, 156, 157, 157, 156, 156, 157, 158]
, * label: amorphous-colony
  xcoords: [201, 203, 203, 204, 205, 204, 205, 205, 206, 207, 208, 208, 208, 208, 208, 207, 207, 206, 206, 205, 204, 204, 203, 203, 202, 201, 200, 200, 201]
  ycoords: [117, 118, 118, 118, 119, 120, 120, 120, 120, 119, 118, 118, 118, 117, 116, 116, 115, 115, 115, 115, 114, 114, 114, 115, 116, 116, 116, 117, 117]
, * label: single-colony
  xcoords: [168, 169, 169, 170, 170, 170, 170, 169, 169, 168, 168, 168]
  ycoords: [178, 178, 177, 177, 178, 179, 180, 180, 180, 180, 180, 180]
, * label: amorphous-colony
  xcoords: [247, 245, 246, 246, 247, 247, 248, 249, 249, 249, 248, 248, 248, 249, 249, 249, 249, 249, 249, 248, 248, 248, 247]
  ycoords: [147, 150, 150, 151, 151, 151, 151, 152, 151, 150, 150, 149, 149, 148, 147, 147, 147, 147, 146, 146, 146, 146, 146]
, * label: single-colony
  xcoords: [242, 242, 242, 243, 241, 241, 239, 239, 240]
  ycoords: [163, 163, 164, 165, 166, 166, 166, 164, 164]
, * label: single-colony
  xcoords: [124, 124, 125, 125, 125]
  ycoords: [207, 206, 205, 206, 207]
, * label: single-colony
  xcoords: [179, 178, 177, 177, 176, 176, 177, 178]
  ycoords: [136, 137, 137, 136, 136, 135, 135, 135]
, * label: single-colony
  xcoords: [140, 140, 140, 140, 141, 141, 141]
  ycoords: [58, 59, 60, 61, 61, 60, 58]
, * label: single-colony
  xcoords: [116, 117, 117, 118, 117, 116, 116]
  ycoords: [119, 120, 119, 118, 117, 117, 116]
, * label: single-colony
  xcoords: [65, 64, 65, 66, 67, 69, 69, 68, 66]
  ycoords: [193, 194, 195, 196, 196, 196, 195, 193, 193]
, * label: single-colony
  xcoords: [171, 172, 173, 174, 175, 176, 175, 174, 173, 171, 170]
  ycoords: [163, 163, 163, 163, 163, 165, 166, 166, 166, 165, 163]
, * label: single-colony
  xcoords: [114, 114, 115, 116, 117, 118, 118, 117]
  ycoords: [163, 164, 165, 165, 163, 163, 161, 161]
, * label: merged-colony
  xcoords: [144, 144, 144, 144, 144, 145, 145, 145, 145, 145, 145, 145, 146, 146, 146, 146, 145, 145]
  ycoords: [204, 205, 206, 207, 208, 208, 208, 207, 206, 206, 205, 205, 204, 204, 204, 203, 203, 203]
, * label: single-colony
  xcoords: [149, 148, 148, 148, 149, 149, 150, 150, 150, 150, 150]
  ycoords: [206, 207, 207, 208, 208, 208, 208, 207, 207, 206, 206]
, * label: single-colony
  xcoords: [177, 177, 177, 177, 178, 178, 178, 178, 178, 179, 179, 179, 178]
  ycoords: [67, 68, 68, 69, 69, 69, 69, 68, 68, 68, 68, 67, 67]
, * label: single-colony
  xcoords: [203, 202, 202, 202, 203, 203, 203, 204, 204, 204, 204]
  ycoords: [81, 81, 80, 80, 80, 80, 80, 80, 80, 81, 81]
, * label: single-colony
  xcoords: [203, 203, 202, 202, 202, 203, 203, 204, 204, 204]
  ycoords: [82, 82, 83, 83, 84, 84, 84, 84, 83, 82]
, * label: single-colony
  xcoords: [164, 164, 164, 164, 165, 165, 165]
  ycoords: [211, 212, 212, 213, 214, 213, 212]
, * label: single-colony
  xcoords: [79, 79, 79, 79, 79, 79, 79, 80, 80, 80, 80, 80]
  ycoords: [167, 168, 169, 169, 169, 170, 170, 171, 170, 169, 169, 169]
, * label: single-colony
  xcoords: [103, 103, 103, 102, 103, 104, 104, 104, 104, 104]
  ycoords: [193, 194, 195, 196, 196, 196, 196, 196, 195, 194]
, * label: single-colony
  xcoords: [91, 90, 90, 90, 91, 91, 92]
  ycoords: [203, 203, 204, 204, 204, 204, 203]
, * label: single-colony
  xcoords: [209, 209, 208, 208, 209, 209, 210, 210, 210, 210]
  ycoords: [80, 80, 81, 81, 82, 82, 82, 82, 81, 80]
]
292 292
###########
img_name: aditya-42.png, annotations: 
[* label: single-colony
  xcoords: [202, 202, 203, 204, 204, 204, 203, 202, 202]
  ycoords: [73, 74, 75, 74, 73, 72, 72, 71, 73]
, * label: single-colony
  xcoords: [183, 182, 182, 182, 182, 183, 184, 184]
  ycoords: [59, 60, 60, 61, 63, 63, 61, 60]
, * label: single-colony
  xcoords: [222, 222, 223, 223, 223, 223, 223, 222, 222, 221, 220, 220, 219, 219, 219, 220, 220]
  ycoords: [168, 169, 170, 170, 171, 171, 172, 173, 173, 173, 173, 172, 171, 170, 169, 168, 168]
, * label: single-colony
  xcoords: [238, 237, 236, 236, 236, 237]
  ycoords: [156, 154, 155, 157, 158, 157]
, * label: single-colony
  xcoords: [259, 258, 259, 260, 261, 260, 259]
  ycoords: [159, 160, 162, 161, 160, 159, 159]
, * label: single-colony
  xcoords: [250, 249, 249, 248, 248, 248, 249, 250]
  ycoords: [156, 155, 154, 154, 155, 156, 157, 156]
, * label: single-colony
  xcoords: [177, 176, 175, 175, 175, 176, 177]
  ycoords: [167, 167, 168, 169, 170, 170, 170]
, * label: single-colony
  xcoords: [217, 217, 218, 217, 217, 216, 216, 215, 215, 215, 216, 217]
  ycoords: [167, 168, 169, 170, 172, 173, 172, 171, 169, 168, 167, 166]
, * label: amorphous-colony
  xcoords: [154, 154, 155, 155, 155, 156, 156, 157, 157, 157, 157, 158, 158, 157, 157, 156, 156, 156, 155, 155, 155, 155, 154, 154, 154, 153, 153, 153, 152, 152, 152, 152, 153, 153, 153, 153, 154, 154]
  ycoords: [218, 217, 216, 216, 217, 217, 218, 218, 217, 216, 215, 215, 215, 214, 214, 214, 213, 212, 212, 212, 212, 213, 214, 214, 214, 214, 214, 215, 215, 216, 216, 217, 217, 218, 218, 218, 218, 218]
, * label: single-colony
  xcoords: [210, 210, 210, 210, 209, 208, 208, 207, 207, 207, 208, 209, 209, 210, 211, 211]
  ycoords: [73, 74, 75, 76, 76, 76, 75, 75, 74, 73, 73, 72, 71, 71, 72, 73]
, * label: single-colony
  xcoords: [209, 208, 207, 207, 207, 208, 208, 209, 209, 209]
  ycoords: [77, 77, 77, 78, 78, 79, 79, 79, 78, 77]
, * label: single-colony
  xcoords: [196, 196, 196, 195, 195, 195, 195]
  ycoords: [90, 91, 92, 93, 91, 90, 90]
, * label: amorphous-colony
  xcoords: [158, 158, 158, 157, 157, 157, 158, 158, 159, 159, 160, 161, 161, 161, 161, 161, 160, 160, 159]
  ycoords: [213, 212, 212, 211, 211, 210, 210, 210, 209, 209, 209, 210, 210, 211, 212, 212, 212, 211, 211]
, * label: single-colony
  xcoords: [147, 145, 145, 144, 144, 144, 144, 145, 147, 148, 148, 148, 147]
  ycoords: [183, 184, 184, 183, 183, 181, 180, 180, 180, 180, 182, 183, 184]
, * label: amorphous-colony
  xcoords: [111, 109, 108, 108, 107, 106, 106, 107, 107, 107, 106, 105, 105, 105, 104, 104, 104, 103, 104, 104, 104, 105, 103, 103, 103, 103, 105, 105, 105, 106, 107, 108, 109, 110, 111, 111, 111, 112, 110]
  ycoords: [159, 159, 158, 156, 156, 156, 157, 158, 158, 159, 159, 159, 158, 157, 157, 157, 158, 159, 159, 160, 160, 161, 161, 161, 162, 162, 162, 162, 163, 164, 164, 164, 163, 163, 162, 161, 161, 159, 159]
, * label: amorphous-colony
  xcoords: [115, 117, 118, 119, 121, 124, 123, 121, 119, 118, 118, 117, 116, 115, 115, 115]
  ycoords: [158, 159, 160, 160, 159, 159, 157, 156, 156, 156, 157, 157, 156, 156, 157, 158]
, * label: amorphous-colony
  xcoords: [91, 89, 89, 88, 87, 88, 87, 87, 86, 85, 84, 84, 84, 84, 84, 85, 85, 86, 86, 87, 88, 88, 89, 89, 90, 91, 92, 92, 91]
  ycoords: [117, 118, 118, 118, 119, 120, 120, 120, 120, 119, 118, 118, 118, 117, 116, 116, 115, 115, 115, 115, 114, 114, 114, 115, 116, 116, 116, 117, 117]
, * label: single-colony
  xcoords: [124, 123, 123, 122, 122, 122, 122, 123, 123, 124, 124, 124]
  ycoords: [178, 178, 177, 177, 178, 179, 180, 180, 180, 180, 180, 180]
, * label: amorphous-colony
  xcoords: [45, 47, 46, 46, 45, 45, 44, 43, 43, 43, 44, 44, 44, 43, 43, 43, 43, 43, 43, 44, 44, 44, 45]
  ycoords: [147, 150, 150, 151, 151, 151, 151, 152, 151, 150, 150, 149, 149, 148, 147, 147, 147, 147, 146, 146, 146, 146, 146]
, * label: single-colony
  xcoords: [50, 50, 50, 49, 51, 51, 53, 53, 52]
  ycoords: [163, 163, 164, 165, 166, 166, 166, 164, 164]
, * label: single-colony
  xcoords: [168, 168, 167, 167, 167]
  ycoords: [207, 206, 205, 206, 207]
, * label: single-colony
  xcoords: [113, 114, 115, 115, 116, 116, 115, 114]
  ycoords: [136, 137, 137, 136, 136, 135, 135, 135]
, * label: single-colony
  xcoords: [152, 152, 152, 152, 151, 151, 151]
  ycoords: [58, 59, 60, 61, 61, 60, 58]
, * label: single-colony
  xcoords: [176, 175, 175, 174, 175, 176, 176]
  ycoords: [119, 120, 119, 118, 117, 117, 116]
, * label: single-colony
  xcoords: [227, 228, 227, 226, 225, 223, 223, 224, 226]
  ycoords: [193, 194, 195, 196, 196, 196, 195, 193, 193]
, * label: single-colony
  xcoords: [121, 120, 119, 118, 117, 116, 117, 118, 119, 121, 122]
  ycoords: [163, 163, 163, 163, 163, 165, 166, 166, 166, 165, 163]
, * label: single-colony
  xcoords: [178, 178, 177, 176, 175, 174, 174, 175]
  ycoords: [163, 164, 165, 165, 163, 163, 161, 161]
, * label: merged-colony
  xcoords: [148, 148, 148, 148, 148, 147, 147, 147, 147, 147, 147, 147, 146, 146, 146, 146, 147, 147]
  ycoords: [204, 205, 206, 207, 208, 208, 208, 207, 206, 206, 205, 205, 204, 204, 204, 203, 203, 203]
, * label: single-colony
  xcoords: [143, 144, 144, 144, 143, 143, 142, 142, 142, 142, 142]
  ycoords: [206, 207, 207, 208, 208, 208, 208, 207, 207, 206, 206]
, * label: single-colony
  xcoords: [115, 115, 115, 115, 114, 114, 114, 114, 114, 113, 113, 113, 114]
  ycoords: [67, 68, 68, 69, 69, 69, 69, 68, 68, 68, 68, 67, 67]
, * label: single-colony
  xcoords: [89, 90, 90, 90, 89, 89, 89, 88, 88, 88, 88]
  ycoords: [81, 81, 80, 80, 80, 80, 80, 80, 80, 81, 81]
, * label: single-colony
  xcoords: [89, 89, 90, 90, 90, 89, 89, 88, 88, 88]
  ycoords: [82, 82, 83, 83, 84, 84, 84, 84, 83, 82]
, * label: single-colony
  xcoords: [128, 128, 128, 128, 127, 127, 127]
  ycoords: [211, 212, 212, 213, 214, 213, 212]
, * label: single-colony
  xcoords: [213, 213, 213, 213, 213, 213, 213, 212, 212, 212, 212, 212]
  ycoords: [167, 168, 169, 169, 169, 170, 170, 171, 170, 169, 169, 169]
, * label: single-colony
  xcoords: [189, 189, 189, 190, 189, 188, 188, 188, 188, 188]
  ycoords: [193, 194, 195, 196, 196, 196, 196, 196, 195, 194]
, * label: single-colony
  xcoords: [201, 202, 202, 202, 201, 201, 200]
  ycoords: [203, 203, 204, 204, 204, 204, 203]
, * label: single-colony
  xcoords: [83, 83, 84, 84, 83, 83, 82, 82, 82, 82]
  ycoords: [80, 80, 81, 81, 82, 82, 82, 82, 81, 80]
]
###########
img_name: aditya-42.png, annotations: 
[* label: single-colony
  xcoords: [90, 90, 89, 88, 88, 88, 89, 90, 90]
  ycoords: [219, 218, 217, 218, 219, 220, 220, 221, 219]
, * label: single-colony
  xcoords: [109, 110, 110, 110, 110, 109, 108, 108]
  ycoords: [233, 232, 232, 231, 229, 229, 231, 232]
, * label: single-colony
  xcoords: [70, 70, 69, 69, 69, 69, 69, 70, 70, 71, 72, 72, 73, 73, 73, 72, 72]
  ycoords: [124, 123, 122, 122, 121, 121, 120, 119, 119, 119, 119, 120, 121, 122, 123, 124, 124]
, * label: single-colony
  xcoords: [54, 55, 56, 56, 56, 55]
  ycoords: [136, 138, 137, 135, 134, 135]
, * label: single-colony
  xcoords: [33, 34, 33, 32, 31, 32, 33]
  ycoords: [133, 132, 130, 131, 132, 133, 133]
, * label: single-colony
  xcoords: [42, 43, 43, 44, 44, 44, 43, 42]
  ycoords: [136, 137, 138, 138, 137, 136, 135, 136]
, * label: single-colony
  xcoords: [115, 116, 117, 117, 117, 116, 115]
  ycoords: [125, 125, 124, 123, 122, 122, 122]
, * label: single-colony
  xcoords: [75, 75, 74, 75, 75, 76, 76, 77, 77, 77, 76, 75]
  ycoords: [125, 124, 123, 122, 120, 119, 120, 121, 123, 124, 125, 126]
, * label: amorphous-colony
  xcoords: [138, 138, 137, 137, 137, 136, 136, 135, 135, 135, 135, 134, 134, 135, 135, 136, 136, 136, 137, 137, 137, 137, 138, 138, 138, 139, 139, 139, 140, 140, 140, 140, 139, 139, 139, 139, 138, 138]
  ycoords: [74, 75, 76, 76, 75, 75, 74, 74, 75, 76, 77, 77, 77, 78, 78, 78, 79, 80, 80, 80, 80, 79, 78, 78, 78, 78, 78, 77, 77, 76, 76, 75, 75, 74, 74, 74, 74, 74]
, * label: single-colony
  xcoords: [82, 82, 82, 82, 83, 84, 84, 85, 85, 85, 84, 83, 83, 82, 81, 81]
  ycoords: [219, 218, 217, 216, 216, 216, 217, 217, 218, 219, 219, 220, 221, 221, 220, 219]
, * label: single-colony
  xcoords: [83, 84, 85, 85, 85, 84, 84, 83, 83, 83]
  ycoords: [215, 215, 215, 214, 214, 213, 213, 213, 214, 215]
, * label: single-colony
  xcoords: [96, 96, 96, 97, 97, 97, 97]
  ycoords: [202, 201, 200, 199, 201, 202, 202]
, * label: amorphous-colony
  xcoords: [134, 134, 134, 135, 135, 135, 134, 134, 133, 133, 132, 131, 131, 131, 131, 131, 132, 132, 133]
  ycoords: [79, 80, 80, 81, 81, 82, 82, 82, 83, 83, 83, 82, 82, 81, 80, 80, 80, 81, 81]
, * label: single-colony
  xcoords: [145, 147, 147, 148, 148, 148, 148, 147, 145, 144, 144, 144, 145]
  ycoords: [109, 108, 108, 109, 109, 111, 112, 112, 112, 112, 110, 109, 108]
, * label: amorphous-colony
  xcoords: [181, 183, 184, 184, 185, 186, 186, 185, 185, 185, 186, 187, 187, 187, 188, 188, 188, 189, 188, 188, 188, 187, 189, 189, 189, 189, 187, 187, 187, 186, 185, 184, 183, 182, 181, 181, 181, 180, 182]
  ycoords: [133, 133, 134, 136, 136, 136, 135, 134, 134, 133, 133, 133, 134, 135, 135, 135, 134, 133, 133, 132, 132, 131, 131, 131, 130, 130, 130, 130, 129, 128, 128, 128, 129, 129, 130, 131, 131, 133, 133]
, * label: amorphous-colony
  xcoords: [177, 175, 174, 173, 171, 168, 169, 171, 173, 174, 174, 175, 176, 177, 177, 177]
  ycoords: [134, 133, 132, 132, 133, 133, 135, 136, 136, 136, 135, 135, 136, 136, 135, 134]
, * label: amorphous-colony
  xcoords: [201, 203, 203, 204, 205, 204, 205, 205, 206, 207, 208, 208, 208, 208, 208, 207, 207, 206, 206, 205, 204, 204, 203, 203, 202, 201, 200, 200, 201]
  ycoords: [175, 174, 174, 174, 173, 172, 172, 172, 172, 173, 174, 174, 174, 175, 176, 176, 177, 177, 177, 177, 178, 178, 178, 177, 176, 176, 176, 175, 175]
, * label: single-colony
  xcoords: [168, 169, 169, 170, 170, 170, 170, 169, 169, 168, 168, 168]
  ycoords: [114, 114, 115, 115, 114, 113, 112, 112, 112, 112, 112, 112]
, * label: amorphous-colony
  xcoords: [247, 245, 246, 246, 247, 247, 248, 249, 249, 249, 248, 248, 248, 249, 249, 249, 249, 249, 249, 248, 248, 248, 247]
  ycoords: [145, 142, 142, 141, 141, 141, 141, 140, 141, 142, 142, 143, 143, 144, 145, 145, 145, 145, 146, 146, 146, 146, 146]
, * label: single-colony
  xcoords: [242, 242, 242, 243, 241, 241, 239, 239, 240]
  ycoords: [129, 129, 128, 127, 126, 126, 126, 128, 128]
, * label: single-colony
  xcoords: [124, 124, 125, 125, 125]
  ycoords: [85, 86, 87, 86, 85]
, * label: single-colony
  xcoords: [179, 178, 177, 177, 176, 176, 177, 178]
  ycoords: [156, 155, 155, 156, 156, 157, 157, 157]
, * label: single-colony
  xcoords: [140, 140, 140, 140, 141, 141, 141]
  ycoords: [234, 233, 232, 231, 231, 232, 234]
, * label: single-colony
  xcoords: [116, 117, 117, 118, 117, 116, 116]
  ycoords: [173, 172, 173, 174, 175, 175, 176]
, * label: single-colony
  xcoords: [65, 64, 65, 66, 67, 69, 69, 68, 66]
  ycoords: [99, 98, 97, 96, 96, 96, 97, 99, 99]
, * label: single-colony
  xcoords: [171, 172, 173, 174, 175, 176, 175, 174, 173, 171, 170]
  ycoords: [129, 129, 129, 129, 129, 127, 126, 126, 126, 127, 129]
, * label: single-colony
  xcoords: [114, 114, 115, 116, 117, 118, 118, 117]
  ycoords: [129, 128, 127, 127, 129, 129, 131, 131]
, * label: merged-colony
  xcoords: [144, 144, 144, 144, 144, 145, 145, 145, 145, 145, 145, 145, 146, 146, 146, 146, 145, 145]
  ycoords: [88, 87, 86, 85, 84, 84, 84, 85, 86, 86, 87, 87, 88, 88, 88, 89, 89, 89]
, * label: single-colony
  xcoords: [149, 148, 148, 148, 149, 149, 150, 150, 150, 150, 150]
  ycoords: [86, 85, 85, 84, 84, 84, 84, 85, 85, 86, 86]
, * label: single-colony
  xcoords: [177, 177, 177, 177, 178, 178, 178, 178, 178, 179, 179, 179, 178]
  ycoords: [225, 224, 224, 223, 223, 223, 223, 224, 224, 224, 224, 225, 225]
, * label: single-colony
  xcoords: [203, 202, 202, 202, 203, 203, 203, 204, 204, 204, 204]
  ycoords: [211, 211, 212, 212, 212, 212, 212, 212, 212, 211, 211]
, * label: single-colony
  xcoords: [203, 203, 202, 202, 202, 203, 203, 204, 204, 204]
  ycoords: [210, 210, 209, 209, 208, 208, 208, 208, 209, 210]
, * label: single-colony
  xcoords: [164, 164, 164, 164, 165, 165, 165]
  ycoords: [81, 80, 80, 79, 78, 79, 80]
, * label: single-colony
  xcoords: [79, 79, 79, 79, 79, 79, 79, 80, 80, 80, 80, 80]
  ycoords: [125, 124, 123, 123, 123, 122, 122, 121, 122, 123, 123, 123]
, * label: single-colony
  xcoords: [103, 103, 103, 102, 103, 104, 104, 104, 104, 104]
  ycoords: [99, 98, 97, 96, 96, 96, 96, 96, 97, 98]
, * label: single-colony
  xcoords: [91, 90, 90, 90, 91, 91, 92]
  ycoords: [89, 89, 88, 88, 88, 88, 89]
, * label: single-colony
  xcoords: [209, 209, 208, 208, 209, 209, 210, 210, 210, 210]
  ycoords: [212, 212, 211, 211, 210, 210, 210, 210, 211, 212]
]
In [9]:
#pprint.pprint(img_meta_obj)
In [9]:
 
In [10]:
img_obj = img_meta_obj.load_image()
image_shape=img_obj.shape
print(image_shape[0],image_shape[1])
print(img_obj.shape)
visualize.display_images([img_obj])
292 292
(292, 292, 3)
In [11]:
#img_obj_vertical_flip = cv2.flip(img_obj, 0)
#visualize.display_images([img_obj_vertical_flip])  #show the vertically flipped image 

#img_obj_horizontal_flip = cv2.flip(img_obj, 1)
#visualize.display_images([img_obj_horizontal_flip])  #show the vertically flipped image 

Parse all annotation files

In [12]:
img_metas = []
file_name=[]

for file in os.listdir(anno_path):
    if file.endswith('.csv'):
        print('Parsing', file, '...')
        img_meta_obj = img_meta()
        file_name.append(file)
        img_meta_obj.parse(file)
        img_metas.append(img_meta_obj)
    else:
        print('[WARNING] Non-CSV file detected:', file)
Parsing Adityap_via_project_42.csv ...
Parsing ElSafadi-40.csv ...
Parsing Kedhar_via_project_58.csv ...
Parsing Harish_via_project_44.csv ...
Parsing Yuji_via_project_89.csv ...
Parsing Pagare-58.csv ...
Parsing Doppalapudi-20.csv ...
Parsing bonifacesindala_via_project_77.csv ...
Parsing Lee-32.csv ...
Parsing Kolla-30.csv ...
Parsing Pravasini_via_project_66.csv ...
Parsing sagar_via_project_43.csv ...
Parsing Bais-9.csv ...
Parsing Jing_via_project_74.csv ...
Parsing Bachamolla-25.csv ...
Parsing Alshahrani-21.csv ...
Parsing Madhu_via_project_55.csv ...
Parsing sagar_via_project_61 .csv ...
Parsing Trenton_via_project_65.csv ...
Parsing Chen-15.csv ...
Parsing Kolla-48.csv ...
Parsing sandeep_via_project_67.csv ...
Parsing Kedhar_via_project_76.csv ...
Parsing Brian-56.csv ...
Parsing Pravasini_via_project_48.csv ...
Parsing Brendan_via_project_66.csv ...
Parsing santosh_via_project_46.csv ...
Parsing santosh_via_project_64.csv ...
Parsing bonifacesindala_via_project_59.csv ...
Parsing Piyush_via_project_60.csv ...
Parsing Nreddy-55.csv ...
Parsing Kang-47.csv ...
Parsing Bachamolla-7.csv ...
Parsing Brian-38.csv ...
Parsing Piyush_via_project_78.csv ...
Parsing Dorasala-39 (jpg).csv ...
Parsing He-28.csv ...
Parsing Donga-19.csv ...
Parsing Jing_via_project_92.csv ...
Parsing Divvela-18.csv ...
Parsing He-46.csv ...
Parsing Foote-41.csv ...
Parsing Adityap_via_project_60.csv ...
Parsing Sucic_via_project_61.csv ...
Parsing Saylee_via_project_70 .csv ...
Parsing Agberebi-19.csv ...
Parsing Chen-33.csv ...
Parsing Agberebi-1.csv ...
Parsing Doppalapudi-38.csv ...
Parsing Pagare-40.csv ...
Parsing hamid_via_project_88.csv ...
Parsing Gundlapally-44.csv ...
Parsing Gundlapally-26.csv ...
Parsing Amulya_via_project_85 .csv ...
Parsing Krutul-31.csv ...
Parsing Saylee_via_project_52.csv ...
Parsing Foote-23.csv ...
Parsing Trenton_via_project_83.csv ...
Parsing Madhu_via_project_73.csv ...
Parsing Guvvala-27.csv ...
Parsing OLADRI-39.csv ...
Parsing Guvvala-45.csv ...
Parsing Ran Yan_via_project_76.csv ...
Parsing Glass-24.csv ...
Parsing Alshahrani-3.csv ...
Parsing Harish_via_project_62.csv ...
Parsing sandeep_via_project_49.csv ...
Parsing Brendan_via_project_84 .csv ...
Parsing Devanshi_via_project_63.csv ...
Parsing Ran Yan_via_project_94.csv ...
Parsing Sucic_via_project_79.csv ...
Parsing Lee-50.csv ...
Parsing Amulya_via_project_67.csv ...
Parsing Glass-42.csv ...
Parsing Divvela-36.csv ...
Parsing Yuji_via_project_71.csv ...
Parsing Krutul-49.csv ...
Parsing OLADRI-57.csv ...
Parsing Bais-27.csv ...
Parsing Dorasala-21 (jpg).csv ...
Parsing ElSafadi-22.csv ...
Parsing hamid_via_project_70.csv ...
Parsing Devanshi_via_project_45.csv ...
In [13]:
print(len(img_metas))
83
In [14]:
print(file_name[0])
Adityap_via_project_42.csv

Visualization

In [15]:
def load_mask(img_file, img_meta_obj):
    
    h = img_file.shape[0]
    w = img_file.shape[1]
    #print(w)
    #print(h)
    
    annotations = img_meta_obj.annotations
    #print(annotations)
    
    # create one array for all masks, each on a different channel
    masks = np.zeros([h, w, len(annotations)], dtype='uint8')
    #print(masks)
    
    #Draw polygons in empty mask
    for i, r in enumerate(annotations):
        
        class_label = r.label
        all_points_x = r.xcoords
        all_points_y = r.ycoords
        
        #assert (len(all_points_x) == len(all_points_y)), 'all_points_x != all_points_y'
        rr, cc = skimage.draw.polygon(all_points_y, all_points_x)
        masks[rr, cc, i] = 1
        
    return masks
In [16]:
# Visualize a random file

#sample_idx = np.random.choice(len(img_metas))
sample_idx = 1
print("Sample Index: ", sample_idx)

#Image Meta Data
img_meta_obj = img_metas[sample_idx]
print("Number of annotations: ", len(img_meta_obj.annotations))

img_file = img_meta_obj.load_image()
print("Image shape: ", img_file.shape)

masks = load_mask(img_file, img_meta_obj)
print("Mask shape: ", masks.shape)

visualize.display_masked_instances(img_file, img_meta_obj.annotations, masks)
Sample Index:  1
Number of annotations:  21
Image shape:  (266, 273, 3)
Mask shape:  (266, 273, 21)
In [17]:
# Visualize a random file

#sample_idx = np.random.choice(len(img_metas))
for sample_idx in range(0,1):
#sample_idx = 1
    print("Sample Index: ", sample_idx)
    img_meta_obj = img_meta()
    img_meta_obj.parse(file_name[sample_idx])
    #Image Meta Data
    #img_meta_obj = img_metas[sample_idx]
    print("Number of annotations: ", len(img_meta_obj.annotations))

    img_file = img_meta_obj.load_image()
    print("Image shape: ", img_file.shape)

    masks = load_mask(img_file, img_meta_obj)
    print("Mask shape: ", masks.shape)

    visualize.display_masked_instances(img_file, img_meta_obj.annotations, masks)
    # test the parsing of one annotation file
    #test_anno_file = 'Adityap_via_project_42.csv'


    image_shape=img_file.shape
    img_obj_vertical_flip = cv2.flip(img_file, 0)
    #visualize.display_images([img_obj_vertical_flip])  #show the vertically flipped image 

    img_obj_horizontal_flip = cv2.flip(img_file, 1)
    #visualize.display_images([img_obj_horizontal_flip])  #show the horizotal flipped image 
    print(image_shape[0],image_shape[1])
    #print(img_obj.shape)
    print("###########")
    img_meta_obj_horizontal = img_meta()
    img_meta_obj_horizontal.horizontal_image_augmentation(file_name[sample_idx],image_shape[1])
    #pprint.pprint(img_meta_obj)

    #sample_idx = 2
    #print("Sample Index: ", sample_idx)

    #Image Meta Data
    #img_meta_obj = img_metas[sample_idx]
    print("Number of annotations: ", len(img_meta_obj_horizontal.annotations))

    img_file = img_meta_obj_horizontal.load_image()
    print("Image shape: ", img_file.shape)

    masks_hori = load_mask(img_obj_horizontal_flip, img_meta_obj_horizontal)
    print("Mask shape: ", masks_hori.shape)
    print("sample index=",sample_idx," after horizontal flipping")
    visualize.display_masked_instances(img_obj_horizontal_flip, img_meta_obj_horizontal.annotations, masks_hori)

    print("###########")
    img_meta_obj_vertical = img_meta()
    img_meta_obj_vertical.vertical_image_augmentation(file_name[sample_idx],image_shape[0])
    #pprint.pprint(img_meta_obj)
    masks_verti = load_mask(img_obj_vertical_flip, img_meta_obj_vertical)
    print("Mask shape: ", masks_verti.shape)
    print("sample index=",sample_idx,"after vertical flipping")
    visualize.display_masked_instances(img_obj_vertical_flip, img_meta_obj_vertical.annotations, masks_verti)
Sample Index:  0
Number of annotations:  37
Image shape:  (292, 292, 3)
Mask shape:  (292, 292, 37)
292 292
###########
Number of annotations:  37
Image shape:  (292, 292, 3)
Mask shape:  (292, 292, 37)
sample index= 0  after horizontal flipping
###########
Mask shape:  (292, 292, 37)
sample index= 0 after vertical flipping

FCN Demo

Data Processing

In [18]:
# Image size that we are going to use
IMG_SIZE = 256
# Our images are RGB (3 channels)
N_CHANNELS = 3
# Foreground and backgroung
N_CLASSES = 2
In [19]:
#Loads the data and creates a tuple of images
#masks to be later made into Tensorflow data object

def dataLoader():
    img_list = []
    mask_list = []
    
    
    for i in range(len(img_metas)):
        
        img_meta_obj = img_metas[i]
        #print(file_name[i])
        img_file = img_meta_obj.load_image()
        #print("Image shape: ", img_file.shape)
        
        #Convert mask to the shape of an image (3D) with channel=1
        masks = load_mask(img_file, img_meta_obj)
        #print("Masks shape: ", masks.shape)
        #Merge all separate masks to one mask
        mask = np.sum(masks, axis = -1)
        #print("Mask shape: ", mask.shape)
        mask = np.expand_dims(mask, axis = -1)
        #print("Mask shape: ", mask.shape)
        
        #Rezize mask and image to common size
        # Resize Image to 256x256
        img_file, _, scale, padding,_ = my_utils.resize_image(img_file, max_dim = IMG_SIZE, mode="square")
        #print("Image shape: ", img_file.shape)
        # Resize Mask to 256x256
        mask = my_utils.resize_mask(mask, scale, padding)
        #Fix Overlap bug
        mask = np.where(mask > 1, 1, mask)
        mask = mask.astype(np.float32)
        #print(np.amax(mask))
        #print("Mask shape: ", mask.shape)
        
        #Normalize Image
        img_file = img_file.astype(np.float32)
        img_file = img_file/255.0
        #print("Image shape: ", img_file.shape)
        

        img_list.append(img_file)
        mask_list.append(mask)

# for horizontal flip
        img_meta_obj = img_metas[i]
        
        img_file = img_meta_obj.load_image()
        image_shape=img_file.shape
        #print("Image shape: ", img_file.shape)
        #horizontal flip image file
        img_file = cv2.flip(img_file, 1)
        img_meta_obj_horizontal = img_meta()
        img_meta_obj_horizontal.horizontal_image_augmentation(file_name[i],image_shape[1])
   
        #Convert mask to the shape of an image (3D) with channel=1
        masks = load_mask(img_file, img_meta_obj_horizontal)
        #print("Masks shape: ", masks.shape)
        #Merge all separate masks to one mask
        mask = np.sum(masks, axis = -1)
        #print("Mask shape: ", mask.shape)
        mask = np.expand_dims(mask, axis = -1)
        #print("Mask shape: ", mask.shape)
        
        #Rezize mask and image to common size
        # Resize Image to 256x256
        img_file, _, scale, padding,_ = my_utils.resize_image(img_file, max_dim = IMG_SIZE, mode="square")
        #print("Image shape: ", img_file.shape)
        # Resize Mask to 256x256
        mask = my_utils.resize_mask(mask, scale, padding)
        #Fix Overlap bug
        mask = np.where(mask > 1, 1, mask)
        mask = mask.astype(np.float32)
        #print(np.amax(mask))
        #print("Mask shape: ", mask.shape)
        
        #Normalize Image
        img_file = img_file.astype(np.float32)
        img_file = img_file/255.0
        #print("Image shape: ", img_file.shape)
        

        img_list.append(img_file)
        mask_list.append(mask)

# for vertical flip
        img_meta_obj = img_metas[i]
        
        img_file = img_meta_obj.load_image()
        image_shape=img_file.shape
        #print("Image shape: ", img_file.shape)
        #vertical flip image file
        img_file = cv2.flip(img_file, 0)
        
        img_meta_obj_vertical = img_meta()
        img_meta_obj_vertical.vertical_image_augmentation(file_name[i],image_shape[0])
        
        #Convert mask to the shape of an image (3D) with channel=1
        masks = load_mask(img_file, img_meta_obj_vertical)
        #print("Masks shape: ", masks.shape)
        #Merge all separate masks to one mask
        mask = np.sum(masks, axis = -1)
        #print("Mask shape: ", mask.shape)
        mask = np.expand_dims(mask, axis = -1)
        #print("Mask shape: ", mask.shape)
        
        #Rezize mask and image to common size
        # Resize Image to 256x256
        img_file, _, scale, padding,_ = my_utils.resize_image(img_file, max_dim = IMG_SIZE, mode="square")
        #print("Image shape: ", img_file.shape)
        # Resize Mask to 256x256
        mask = my_utils.resize_mask(mask, scale, padding)
        #Fix Overlap bug
        mask = np.where(mask > 1, 1, mask)
        mask = mask.astype(np.float32)
        #print(np.amax(mask))
        #print("Mask shape: ", mask.shape)
        
        #Normalize Image
        img_file = img_file.astype(np.float32)
        img_file = img_file/255.0
        #print("Image shape: ", img_file.shape)
        

        img_list.append(img_file)
        mask_list.append(mask)



    return (img_list, mask_list)

Create Tensorflow Data Loader

In [20]:
dataset = tf.data.Dataset.from_tensor_slices(dataLoader())

Train/Validation Split

In [21]:
train_size = int(0.9 * len(img_metas))
val_size = int(0.1 * len(img_metas))

train_dataset = dataset.take(train_size)
val_dataset = dataset.skip(train_size)
In [21]:
 

Training

The following training code and model code was adapted from

https://yann-leguilly.gitlab.io/post/2019-12-14-tensorflow-tfdata-segmentation/

Data Tuning Parameters

In [22]:
BATCH_SIZE = 5
BUFFER_SIZE = 1000
# important for reproducibility
# this allows to generate the same random numbers
SEED = 42
AUTOTUNE = tf.data.experimental.AUTOTUNE
In [23]:
dataset = {"train": train_dataset, "val": val_dataset}

# -- Train Dataset --#
#dataset['train'] = dataset['train'].shuffle(buffer_size=BUFFER_SIZE, seed=SEED)
dataset['train'] = dataset['train'].repeat()
dataset['train'] = dataset['train'].batch(BATCH_SIZE)
dataset['train'] = dataset['train'].prefetch(buffer_size=AUTOTUNE)

#-- Validation Dataset --#
dataset['val'] = dataset['val'].repeat()
dataset['val'] = dataset['val'].batch(BATCH_SIZE)
dataset['val'] = dataset['val'].prefetch(buffer_size=AUTOTUNE)

print(dataset['train'])
print(dataset['val'])

# how shuffle works: https://stackoverflow.com/a/53517848
<PrefetchDataset shapes: ((None, 256, 256, 3), (None, 256, 256, 1)), types: (tf.float32, tf.float32)>
<PrefetchDataset shapes: ((None, 256, 256, 3), (None, 256, 256, 1)), types: (tf.float32, tf.float32)>
In [24]:
def display_sample(display_list):
    """Show side-by-side an input image,
    the ground truth and the prediction.
    """
    plt.figure(figsize=(18, 18))

    title = ['Input Image', 'True Mask', 'Predicted Mask']

    for i in range(len(display_list)):
        plt.subplot(1, len(display_list), i+1)
        plt.title(title[i])
        plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))
        plt.axis('off')
    plt.show()
In [25]:
for image, mask in dataset['train'].take(1):
    sample_image, sample_mask = image, mask

display_sample([sample_image[0], sample_mask[0]])
In [26]:
import tensorflow as tf

#def FCN_model(len_classes=5, dropout_rate=0.2):
dropout_rate = 0.5
input_size = (IMG_SIZE, IMG_SIZE, N_CHANNELS)

inputs = Input(shape=input_size)    
#input = tf.keras.layers.Input(shape=(None, None, 3))
len_classes=2
#dropout_rate=0.2
x = tf.keras.layers.Conv2D(filters=32, kernel_size=3, strides=1)(inputs)
x = tf.keras.layers.Dropout(dropout_rate)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)

# x = tf.keras.layers.MaxPooling2D()(x)

x = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1)(x)
x = tf.keras.layers.Dropout(dropout_rate)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)

# x = tf.keras.layers.MaxPooling2D()(x)

x = tf.keras.layers.Conv2D(filters=128, kernel_size=3, strides=2)(x)
x = tf.keras.layers.Dropout(dropout_rate)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)

# x = tf.keras.layers.MaxPooling2D()(x)

x = tf.keras.layers.Conv2D(filters=256, kernel_size=3, strides=2)(x)
x = tf.keras.layers.Dropout(dropout_rate)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)

# x = tf.keras.layers.MaxPooling2D()(x)

x = tf.keras.layers.Conv2D(filters=512, kernel_size=3, strides=2)(x)
x = tf.keras.layers.Dropout(dropout_rate)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)

# Uncomment the below line if you're using dense layers
# x = tf.keras.layers.GlobalMaxPooling2D()(x)

# Fully connected layer 1
# x = tf.keras.layers.Dropout(dropout_rate)(x)
# x = tf.keras.layers.BatchNormalization()(x)
# x = tf.keras.layers.Dense(units=64)(x)
# x = tf.keras.layers.Activation('relu')(x)

# Fully connected layer 1
x = tf.keras.layers.Conv2D(filters=64, kernel_size=1, strides=1)(x)
x = tf.keras.layers.Dropout(dropout_rate)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)

# Fully connected layer 2
# x = tf.keras.layers.Dropout(dropout_rate)(x)
# x = tf.keras.layers.BatchNormalization()(x)
# x = tf.keras.layers.Dense(units=len_classes)(x)
# predictions = tf.keras.layers.Activation('softmax')(x)

# Fully connected layer 2
x = tf.keras.layers.Conv2D(filters=len_classes, kernel_size=1, strides=1)(x)
x = tf.keras.layers.Dropout(dropout_rate)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.GlobalMaxPooling2D()(x)
predictions = tf.keras.layers.Activation('softmax')(x)
output=predictions


#model = tf.keras.Model(inputs=inputs, outputs=predictions)

#print(model.summary())
#print(f'Total number of layers: {len(model.layers)}')

   # return model

#if __name__ == "__main__":
   # FCN_model(len_classes=5, dropout_rate=0.2)
In [27]:
# -- Keras Functional API -- #
# -- FC densenet Implementation -- #
# Everything here is from tensorflow.keras.layers
# I imported tensorflow.keras.layers * to make it easier to read
dropout_rate = 0.2
input_size = (IMG_SIZE, IMG_SIZE, N_CHANNELS)

# If you want to know more about why we are using `he_normal`: 
# https://stats.stackexchange.com/questions/319323/whats-the-difference-between-variance-scaling-initializer-and-xavier-initialize/319849#319849  
# Or the excelent fastai course: 
# https://github.com/fastai/course-v3/blob/master/nbs/dl2/02b_initializing.ipynb
initializer = 'he_normal'


# -- Top Down -- #

inputs = Input(shape=input_size)
conv_enc_1 = Conv2D(256, 3,  padding='same', kernel_initializer=initializer)(inputs)

# layer 1
x1=BatchNormalization()(conv_enc_1)
x1 = Conv2D(256, 3, activation = 'relu', padding='same', kernel_initializer=initializer)(x1)
x1 = tf.keras.layers.Dropout(dropout_rate)(x1)

concatenate_1 = concatenate([x1, conv_enc_1], axis = 3)


# Transition Down 1
td_x1=BatchNormalization()(concatenate_1)
td_x1 = Conv2D(256, 1, activation = 'relu', padding='same', kernel_initializer=initializer)(td_x1)
td_x1 = tf.keras.layers.Dropout(dropout_rate)(td_x1)
max_pool_enc_1 = MaxPooling2D(pool_size=(2, 2))(td_x1)

# layer 2
x2=BatchNormalization()(max_pool_enc_1)
x2 = Conv2D(256, 3, activation = 'relu', padding='same', kernel_initializer=initializer)(x2)
x2 = tf.keras.layers.Dropout(dropout_rate)(x2)


concatenate_2 = concatenate([x2, max_pool_enc_1], axis = 3)


# Transition Down 2
td_x2=BatchNormalization()(concatenate_2)
td_x2 = Conv2D(256, 1, activation = 'relu', padding='same', kernel_initializer=initializer)(td_x2)
td_x2 = tf.keras.layers.Dropout(dropout_rate)(td_x2)
max_pool_enc_2 = MaxPooling2D(pool_size=(2, 2))(td_x2)



# layer 3
x3=BatchNormalization()(max_pool_enc_2)
x3 = Conv2D(256, 3, activation = 'relu', padding='same', kernel_initializer=initializer)(x3)
x3 = tf.keras.layers.Dropout(dropout_rate)(x3)



#Transition UP 1
tu_x1 = Conv2DTranspose(256, 3, strides=(2,2), padding='same', kernel_initializer=initializer)(x3)


# layer 4
x4=BatchNormalization()(tu_x1)
x4 = Conv2D(256, 3, activation = 'relu', padding='same', kernel_initializer=initializer)(x4)
x4 = tf.keras.layers.Dropout(dropout_rate)(x4)


#Transition UP 2
tu_x2 = Conv2DTranspose(256, 3, strides=(2,2), padding='same', kernel_initializer=initializer)(x4)

# layer 5
x5=BatchNormalization()(tu_x2)
x5 = Conv2D(256, 3, activation = 'relu', padding='same', kernel_initializer=initializer)(x5)
x5 = tf.keras.layers.Dropout(dropout_rate)(x5)

output = Conv2D(256, 3,  padding='same', kernel_initializer=initializer)(x5)



#output = Conv2D(N_CLASSES, 1, activation = 'softmax')(conv_dec_4)

Model Saver Class

In [28]:
#This class will automatically save TF models
#at Epoch which are multiples of the SAVE_MULTIPLE parameter.
SAVE_MULTIPLE = 5

class ModelSaver(tf.keras.callbacks.Callback):
    
    def on_epoch_end(self, epoch, logs={}):
        if (epoch)%SAVE_MULTIPLE == 0:  # Save when epochs are multiples of SAVE_MULTIPLE.
            self.model.save(f"./saved_models/model_{epoch}.h5")

Create and Compile Model

In [29]:
model = tf.keras.Model(inputs = inputs, outputs = output)
In [31]:
model.compile(optimizer=Adam(learning_rate=0.0001), loss = tf.keras.losses.SparseCategoricalCrossentropy(),
              metrics=['accuracy'])

Model Summary

In [32]:
model.summary()
Model: "model"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_2 (InputLayer)            [(None, 256, 256, 3) 0                                            
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, 256, 256, 256 7168        input_2[0][0]                    
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 256, 256, 256 1024        conv2d_7[0][0]                   
__________________________________________________________________________________________________
conv2d_8 (Conv2D)               (None, 256, 256, 256 590080      batch_normalization_7[0][0]      
__________________________________________________________________________________________________
dropout_7 (Dropout)             (None, 256, 256, 256 0           conv2d_8[0][0]                   
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, 256, 256, 512 0           dropout_7[0][0]                  
                                                                 conv2d_7[0][0]                   
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 256, 256, 512 2048        concatenate[0][0]                
__________________________________________________________________________________________________
conv2d_9 (Conv2D)               (None, 256, 256, 256 131328      batch_normalization_8[0][0]      
__________________________________________________________________________________________________
dropout_8 (Dropout)             (None, 256, 256, 256 0           conv2d_9[0][0]                   
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D)    (None, 128, 128, 256 0           dropout_8[0][0]                  
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 128, 128, 256 1024        max_pooling2d[0][0]              
__________________________________________________________________________________________________
conv2d_10 (Conv2D)              (None, 128, 128, 256 590080      batch_normalization_9[0][0]      
__________________________________________________________________________________________________
dropout_9 (Dropout)             (None, 128, 128, 256 0           conv2d_10[0][0]                  
__________________________________________________________________________________________________
concatenate_1 (Concatenate)     (None, 128, 128, 512 0           dropout_9[0][0]                  
                                                                 max_pooling2d[0][0]              
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 128, 128, 512 2048        concatenate_1[0][0]              
__________________________________________________________________________________________________
conv2d_11 (Conv2D)              (None, 128, 128, 256 131328      batch_normalization_10[0][0]     
__________________________________________________________________________________________________
dropout_10 (Dropout)            (None, 128, 128, 256 0           conv2d_11[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)  (None, 64, 64, 256)  0           dropout_10[0][0]                 
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 64, 64, 256)  1024        max_pooling2d_1[0][0]            
__________________________________________________________________________________________________
conv2d_12 (Conv2D)              (None, 64, 64, 256)  590080      batch_normalization_11[0][0]     
__________________________________________________________________________________________________
dropout_11 (Dropout)            (None, 64, 64, 256)  0           conv2d_12[0][0]                  
__________________________________________________________________________________________________
conv2d_transpose (Conv2DTranspo (None, 128, 128, 256 590080      dropout_11[0][0]                 
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 128, 128, 256 1024        conv2d_transpose[0][0]           
__________________________________________________________________________________________________
conv2d_13 (Conv2D)              (None, 128, 128, 256 590080      batch_normalization_12[0][0]     
__________________________________________________________________________________________________
dropout_12 (Dropout)            (None, 128, 128, 256 0           conv2d_13[0][0]                  
__________________________________________________________________________________________________
conv2d_transpose_1 (Conv2DTrans (None, 256, 256, 256 590080      dropout_12[0][0]                 
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 256, 256, 256 1024        conv2d_transpose_1[0][0]         
__________________________________________________________________________________________________
conv2d_14 (Conv2D)              (None, 256, 256, 256 590080      batch_normalization_13[0][0]     
__________________________________________________________________________________________________
dropout_13 (Dropout)            (None, 256, 256, 256 0           conv2d_14[0][0]                  
__________________________________________________________________________________________________
conv2d_15 (Conv2D)              (None, 256, 256, 256 590080      dropout_13[0][0]                 
==================================================================================================
Total params: 4,999,680
Trainable params: 4,995,072
Non-trainable params: 4,608
__________________________________________________________________________________________________

Create moodel saver instance

In [31]:
# create and use callback:
saver = ModelSaver()

Training Hyperparameters

In [32]:
EPOCHS = 250

STEPS_PER_EPOCH = train_size // BATCH_SIZE
VALIDATION_STEPS = val_size // BATCH_SIZE

Start Training

In [33]:
# sometimes it can be very interesting to run some batches on cpu
# because the tracing is way better than on GPU
# you will have more obvious error message
# but in our case, it takes A LOT of time

# #On CPU
# with tf.device("/cpu:0"):
#     model_history = model.fit(dataset['train'], epochs=EPOCHS,
#                               callbacks=[saver],
#                               steps_per_epoch=STEPS_PER_EPOCH,
#                               validation_steps=VALIDATION_STEPS,
#                               validation_data=dataset['val'])

  #On GPU
model_history = model.fit(dataset['train'], epochs=EPOCHS,
                          callbacks=[saver],
                          steps_per_epoch=STEPS_PER_EPOCH,
                          validation_steps=VALIDATION_STEPS,
                          validation_data=dataset['val'])
Epoch 1/250
 6/14 [===========>..................] - ETA: 8s - loss: 13.4810 - accuracy: 0.0053WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.4463s vs `on_train_batch_end` time: 0.4911s). Check your callbacks.
14/14 [==============================] - 59s 1s/step - loss: 10.8672 - accuracy: 0.0615 - val_loss: 2.8062 - val_accuracy: 0.3227
Epoch 2/250
14/14 [==============================] - 15s 1s/step - loss: 2.6117 - accuracy: 0.7411 - val_loss: 0.6061 - val_accuracy: 0.8381
Epoch 3/250
14/14 [==============================] - 16s 1s/step - loss: 0.8284 - accuracy: 0.8511 - val_loss: 0.4535 - val_accuracy: 0.8979
Epoch 4/250
14/14 [==============================] - 16s 1s/step - loss: 0.5688 - accuracy: 0.8854 - val_loss: 0.5185 - val_accuracy: 0.9452
Epoch 5/250
14/14 [==============================] - 16s 1s/step - loss: 0.5264 - accuracy: 0.9115 - val_loss: 0.5535 - val_accuracy: 0.9563
Epoch 6/250
14/14 [==============================] - 17s 1s/step - loss: 0.5116 - accuracy: 0.9209 - val_loss: 0.5689 - val_accuracy: 0.9589
Epoch 7/250
14/14 [==============================] - 16s 1s/step - loss: 0.4577 - accuracy: 0.9323 - val_loss: 0.6027 - val_accuracy: 0.9597
Epoch 8/250
14/14 [==============================] - 16s 1s/step - loss: 0.3958 - accuracy: 0.9423 - val_loss: 0.6272 - val_accuracy: 0.9597
Epoch 9/250
14/14 [==============================] - 16s 1s/step - loss: 0.3351 - accuracy: 0.9479 - val_loss: 0.6290 - val_accuracy: 0.9597
Epoch 10/250
14/14 [==============================] - 16s 1s/step - loss: 0.3074 - accuracy: 0.9491 - val_loss: 0.5369 - val_accuracy: 0.9600
Epoch 11/250
14/14 [==============================] - 16s 1s/step - loss: 0.3832 - accuracy: 0.9031 - val_loss: 0.5342 - val_accuracy: 0.9557
Epoch 12/250
14/14 [==============================] - 16s 1s/step - loss: 0.4145 - accuracy: 0.8850 - val_loss: 0.5334 - val_accuracy: 0.9539
Epoch 13/250
14/14 [==============================] - 16s 1s/step - loss: 0.3598 - accuracy: 0.8916 - val_loss: 0.5246 - val_accuracy: 0.9539
Epoch 14/250
14/14 [==============================] - 16s 1s/step - loss: 0.3224 - accuracy: 0.8980 - val_loss: 0.5190 - val_accuracy: 0.9539
Epoch 15/250
14/14 [==============================] - 16s 1s/step - loss: 0.2976 - accuracy: 0.9048 - val_loss: 0.5144 - val_accuracy: 0.9539
Epoch 16/250
14/14 [==============================] - 16s 1s/step - loss: 0.2759 - accuracy: 0.9119 - val_loss: 0.5076 - val_accuracy: 0.9539
Epoch 17/250
14/14 [==============================] - 16s 1s/step - loss: 0.2581 - accuracy: 0.9182 - val_loss: 0.5060 - val_accuracy: 0.9538
Epoch 18/250
14/14 [==============================] - 16s 1s/step - loss: 0.2497 - accuracy: 0.9227 - val_loss: 0.5030 - val_accuracy: 0.9538
Epoch 19/250
14/14 [==============================] - 16s 1s/step - loss: 0.2283 - accuracy: 0.9289 - val_loss: 0.5101 - val_accuracy: 0.9536
Epoch 20/250
14/14 [==============================] - 16s 1s/step - loss: 0.2170 - accuracy: 0.9341 - val_loss: 0.5170 - val_accuracy: 0.9531
Epoch 21/250
14/14 [==============================] - 16s 1s/step - loss: 0.2335 - accuracy: 0.9314 - val_loss: 0.5196 - val_accuracy: 0.9528
Epoch 22/250
14/14 [==============================] - 16s 1s/step - loss: 0.3080 - accuracy: 0.9248 - val_loss: 0.5089 - val_accuracy: 0.9526
Epoch 23/250
14/14 [==============================] - 16s 1s/step - loss: 0.2792 - accuracy: 0.9295 - val_loss: 0.5016 - val_accuracy: 0.9524
Epoch 24/250
14/14 [==============================] - 16s 1s/step - loss: 0.2538 - accuracy: 0.9352 - val_loss: 0.4962 - val_accuracy: 0.9521
Epoch 25/250
14/14 [==============================] - 16s 1s/step - loss: 0.2442 - accuracy: 0.9373 - val_loss: 0.4899 - val_accuracy: 0.9519
Epoch 26/250
14/14 [==============================] - 16s 1s/step - loss: 0.2346 - accuracy: 0.9399 - val_loss: 0.4600 - val_accuracy: 0.9517
Epoch 27/250
14/14 [==============================] - 16s 1s/step - loss: 0.2104 - accuracy: 0.9436 - val_loss: 0.4164 - val_accuracy: 0.9517
Epoch 28/250
14/14 [==============================] - 16s 1s/step - loss: 0.2015 - accuracy: 0.9446 - val_loss: 0.4401 - val_accuracy: 0.9560
Epoch 29/250
14/14 [==============================] - 16s 1s/step - loss: 0.2594 - accuracy: 0.9164 - val_loss: 0.4654 - val_accuracy: 0.9537
Epoch 30/250
14/14 [==============================] - 16s 1s/step - loss: 0.2712 - accuracy: 0.9064 - val_loss: 0.4496 - val_accuracy: 0.9529
Epoch 31/250
14/14 [==============================] - 16s 1s/step - loss: 0.2654 - accuracy: 0.9081 - val_loss: 0.4293 - val_accuracy: 0.9528
Epoch 32/250
14/14 [==============================] - 16s 1s/step - loss: 0.2562 - accuracy: 0.9133 - val_loss: 0.4059 - val_accuracy: 0.9528
Epoch 33/250
14/14 [==============================] - 16s 1s/step - loss: 0.2336 - accuracy: 0.9195 - val_loss: 0.3955 - val_accuracy: 0.9526
Epoch 34/250
14/14 [==============================] - 16s 1s/step - loss: 0.2175 - accuracy: 0.9245 - val_loss: 0.3850 - val_accuracy: 0.9525
Epoch 35/250
14/14 [==============================] - 16s 1s/step - loss: 0.2078 - accuracy: 0.9292 - val_loss: 0.3556 - val_accuracy: 0.9522
Epoch 36/250
14/14 [==============================] - 16s 1s/step - loss: 0.2044 - accuracy: 0.9307 - val_loss: 0.3497 - val_accuracy: 0.9522
Epoch 37/250
14/14 [==============================] - 16s 1s/step - loss: 0.1967 - accuracy: 0.9345 - val_loss: 0.2974 - val_accuracy: 0.9520
Epoch 38/250
14/14 [==============================] - 16s 1s/step - loss: 0.1882 - accuracy: 0.9314 - val_loss: 0.2546 - val_accuracy: 0.9486
Epoch 39/250
14/14 [==============================] - 16s 1s/step - loss: 0.1887 - accuracy: 0.9277 - val_loss: 0.2550 - val_accuracy: 0.9478
Epoch 40/250
14/14 [==============================] - 16s 1s/step - loss: 0.2254 - accuracy: 0.9231 - val_loss: 0.2557 - val_accuracy: 0.9477
Epoch 41/250
14/14 [==============================] - 16s 1s/step - loss: 0.2489 - accuracy: 0.9216 - val_loss: 0.2606 - val_accuracy: 0.9483
Epoch 42/250
14/14 [==============================] - 16s 1s/step - loss: 0.2346 - accuracy: 0.9283 - val_loss: 0.2592 - val_accuracy: 0.9486
Epoch 43/250
14/14 [==============================] - 16s 1s/step - loss: 0.2200 - accuracy: 0.9309 - val_loss: 0.2570 - val_accuracy: 0.9491
Epoch 44/250
14/14 [==============================] - 16s 1s/step - loss: 0.2025 - accuracy: 0.9353 - val_loss: 0.2592 - val_accuracy: 0.9498
Epoch 45/250
14/14 [==============================] - 16s 1s/step - loss: 0.1891 - accuracy: 0.9383 - val_loss: 0.2605 - val_accuracy: 0.9504
Epoch 46/250
14/14 [==============================] - 16s 1s/step - loss: 0.1807 - accuracy: 0.9415 - val_loss: 0.2429 - val_accuracy: 0.9505
Epoch 47/250
14/14 [==============================] - 16s 1s/step - loss: 0.1825 - accuracy: 0.9416 - val_loss: 0.2356 - val_accuracy: 0.9509
Epoch 48/250
14/14 [==============================] - 16s 1s/step - loss: 0.1916 - accuracy: 0.9406 - val_loss: 0.2402 - val_accuracy: 0.9514
Epoch 49/250
14/14 [==============================] - 16s 1s/step - loss: 0.2023 - accuracy: 0.9400 - val_loss: 0.2470 - val_accuracy: 0.9524
Epoch 50/250
14/14 [==============================] - 16s 1s/step - loss: 0.1925 - accuracy: 0.9440 - val_loss: 0.2266 - val_accuracy: 0.9517
Epoch 51/250
14/14 [==============================] - 16s 1s/step - loss: 0.1855 - accuracy: 0.9413 - val_loss: 0.2238 - val_accuracy: 0.9504
Epoch 52/250
14/14 [==============================] - 16s 1s/step - loss: 0.1769 - accuracy: 0.9420 - val_loss: 0.2210 - val_accuracy: 0.9503
Epoch 53/250
14/14 [==============================] - 16s 1s/step - loss: 0.1724 - accuracy: 0.9434 - val_loss: 0.2204 - val_accuracy: 0.9508
Epoch 54/250
14/14 [==============================] - 16s 1s/step - loss: 0.1653 - accuracy: 0.9457 - val_loss: 0.2226 - val_accuracy: 0.9516
Epoch 55/250
14/14 [==============================] - 16s 1s/step - loss: 0.1694 - accuracy: 0.9467 - val_loss: 0.2283 - val_accuracy: 0.9527
Epoch 56/250
14/14 [==============================] - 16s 1s/step - loss: 0.1542 - accuracy: 0.9512 - val_loss: 0.2185 - val_accuracy: 0.9530
Epoch 57/250
14/14 [==============================] - 16s 1s/step - loss: 0.1465 - accuracy: 0.9519 - val_loss: 0.2114 - val_accuracy: 0.9533
Epoch 58/250
14/14 [==============================] - 16s 1s/step - loss: 0.1614 - accuracy: 0.9462 - val_loss: 0.2138 - val_accuracy: 0.9536
Epoch 59/250
14/14 [==============================] - 16s 1s/step - loss: 0.2232 - accuracy: 0.9385 - val_loss: 0.2226 - val_accuracy: 0.9543
Epoch 60/250
14/14 [==============================] - 16s 1s/step - loss: 0.2031 - accuracy: 0.9413 - val_loss: 0.2261 - val_accuracy: 0.9548
Epoch 61/250
14/14 [==============================] - 16s 1s/step - loss: 0.1854 - accuracy: 0.9464 - val_loss: 0.2307 - val_accuracy: 0.9553
Epoch 62/250
14/14 [==============================] - 16s 1s/step - loss: 0.1782 - accuracy: 0.9484 - val_loss: 0.2150 - val_accuracy: 0.9557
Epoch 63/250
14/14 [==============================] - 16s 1s/step - loss: 0.1660 - accuracy: 0.9496 - val_loss: 0.2048 - val_accuracy: 0.9563
Epoch 64/250
14/14 [==============================] - 16s 1s/step - loss: 0.1540 - accuracy: 0.9525 - val_loss: 0.2110 - val_accuracy: 0.9567
Epoch 65/250
14/14 [==============================] - 16s 1s/step - loss: 0.1465 - accuracy: 0.9540 - val_loss: 0.2128 - val_accuracy: 0.9570
Epoch 66/250
14/14 [==============================] - 16s 1s/step - loss: 0.1552 - accuracy: 0.9528 - val_loss: 0.2106 - val_accuracy: 0.9571
Epoch 67/250
14/14 [==============================] - 16s 1s/step - loss: 0.1585 - accuracy: 0.9523 - val_loss: 0.1985 - val_accuracy: 0.9571
Epoch 68/250
14/14 [==============================] - 16s 1s/step - loss: 0.1623 - accuracy: 0.9527 - val_loss: 0.2007 - val_accuracy: 0.9574
Epoch 69/250
14/14 [==============================] - 16s 1s/step - loss: 0.1609 - accuracy: 0.9532 - val_loss: 0.1895 - val_accuracy: 0.9574
Epoch 70/250
14/14 [==============================] - 16s 1s/step - loss: 0.1493 - accuracy: 0.9550 - val_loss: 0.1921 - val_accuracy: 0.9577
Epoch 71/250
14/14 [==============================] - 16s 1s/step - loss: 0.1418 - accuracy: 0.9566 - val_loss: 0.1951 - val_accuracy: 0.9585
Epoch 72/250
14/14 [==============================] - 16s 1s/step - loss: 0.1376 - accuracy: 0.9585 - val_loss: 0.1949 - val_accuracy: 0.9588
Epoch 73/250
14/14 [==============================] - 16s 1s/step - loss: 0.1385 - accuracy: 0.9592 - val_loss: 0.1969 - val_accuracy: 0.9591
Epoch 74/250
14/14 [==============================] - 16s 1s/step - loss: 0.1340 - accuracy: 0.9611 - val_loss: 0.1782 - val_accuracy: 0.9593
Epoch 75/250
14/14 [==============================] - 16s 1s/step - loss: 0.1267 - accuracy: 0.9616 - val_loss: 0.1828 - val_accuracy: 0.9596
Epoch 76/250
14/14 [==============================] - 16s 1s/step - loss: 0.1259 - accuracy: 0.9596 - val_loss: 0.1878 - val_accuracy: 0.9592
Epoch 77/250
14/14 [==============================] - 16s 1s/step - loss: 0.1673 - accuracy: 0.9508 - val_loss: 0.1766 - val_accuracy: 0.9590
Epoch 78/250
14/14 [==============================] - 16s 1s/step - loss: 0.1861 - accuracy: 0.9473 - val_loss: 0.1682 - val_accuracy: 0.9595
Epoch 79/250
14/14 [==============================] - 16s 1s/step - loss: 0.1715 - accuracy: 0.9503 - val_loss: 0.1719 - val_accuracy: 0.9599
Epoch 80/250
14/14 [==============================] - 16s 1s/step - loss: 0.1622 - accuracy: 0.9541 - val_loss: 0.1779 - val_accuracy: 0.9602
Epoch 81/250
14/14 [==============================] - 16s 1s/step - loss: 0.1468 - accuracy: 0.9592 - val_loss: 0.1887 - val_accuracy: 0.9603
Epoch 82/250
14/14 [==============================] - 16s 1s/step - loss: 0.1357 - accuracy: 0.9626 - val_loss: 0.1936 - val_accuracy: 0.9604
Epoch 83/250
14/14 [==============================] - 16s 1s/step - loss: 0.1293 - accuracy: 0.9661 - val_loss: 0.1926 - val_accuracy: 0.9604
Epoch 84/250
14/14 [==============================] - 16s 1s/step - loss: 0.1306 - accuracy: 0.9660 - val_loss: 0.1873 - val_accuracy: 0.9606
Epoch 85/250
14/14 [==============================] - 16s 1s/step - loss: 0.1376 - accuracy: 0.9638 - val_loss: 0.1936 - val_accuracy: 0.9605
Epoch 86/250
14/14 [==============================] - 16s 1s/step - loss: 0.1497 - accuracy: 0.9630 - val_loss: 0.2062 - val_accuracy: 0.9604
Epoch 87/250
14/14 [==============================] - 16s 1s/step - loss: 0.1409 - accuracy: 0.9664 - val_loss: 0.2149 - val_accuracy: 0.9604
Epoch 88/250
14/14 [==============================] - 16s 1s/step - loss: 0.1364 - accuracy: 0.9668 - val_loss: 0.2111 - val_accuracy: 0.9606
Epoch 89/250
14/14 [==============================] - 16s 1s/step - loss: 0.1283 - accuracy: 0.9683 - val_loss: 0.1913 - val_accuracy: 0.9609
Epoch 90/250
14/14 [==============================] - 16s 1s/step - loss: 0.1236 - accuracy: 0.9690 - val_loss: 0.1881 - val_accuracy: 0.9610
Epoch 91/250
14/14 [==============================] - 16s 1s/step - loss: 0.1180 - accuracy: 0.9704 - val_loss: 0.1875 - val_accuracy: 0.9610
Epoch 92/250
14/14 [==============================] - 16s 1s/step - loss: 0.1243 - accuracy: 0.9705 - val_loss: 0.1889 - val_accuracy: 0.9610
Epoch 93/250
14/14 [==============================] - 16s 1s/step - loss: 0.1118 - accuracy: 0.9739 - val_loss: 0.1860 - val_accuracy: 0.9611
Epoch 94/250
14/14 [==============================] - 16s 1s/step - loss: 0.1023 - accuracy: 0.9762 - val_loss: 0.1853 - val_accuracy: 0.9612
Epoch 95/250
14/14 [==============================] - 16s 1s/step - loss: 0.1241 - accuracy: 0.9691 - val_loss: 0.1909 - val_accuracy: 0.9611
Epoch 96/250
14/14 [==============================] - 16s 1s/step - loss: 0.2010 - accuracy: 0.9566 - val_loss: 0.1464 - val_accuracy: 0.9578
Epoch 97/250
14/14 [==============================] - 16s 1s/step - loss: 0.1727 - accuracy: 0.9529 - val_loss: 0.1491 - val_accuracy: 0.9554
Epoch 98/250
14/14 [==============================] - 16s 1s/step - loss: 0.1579 - accuracy: 0.9544 - val_loss: 0.1480 - val_accuracy: 0.9568
Epoch 99/250
14/14 [==============================] - 16s 1s/step - loss: 0.1494 - accuracy: 0.9551 - val_loss: 0.1481 - val_accuracy: 0.9582
Epoch 100/250
14/14 [==============================] - 16s 1s/step - loss: 0.1436 - accuracy: 0.9589 - val_loss: 0.1505 - val_accuracy: 0.9596
Epoch 101/250
14/14 [==============================] - 16s 1s/step - loss: 0.1298 - accuracy: 0.9643 - val_loss: 0.1573 - val_accuracy: 0.9604
Epoch 102/250
14/14 [==============================] - 16s 1s/step - loss: 0.1225 - accuracy: 0.9677 - val_loss: 0.1584 - val_accuracy: 0.9607
Epoch 103/250
14/14 [==============================] - 16s 1s/step - loss: 0.1300 - accuracy: 0.9657 - val_loss: 0.1460 - val_accuracy: 0.9615
Epoch 104/250
14/14 [==============================] - 16s 1s/step - loss: 0.1374 - accuracy: 0.9550 - val_loss: 0.1535 - val_accuracy: 0.9497
Epoch 105/250
14/14 [==============================] - 16s 1s/step - loss: 0.1513 - accuracy: 0.9445 - val_loss: 0.1598 - val_accuracy: 0.9498
Epoch 106/250
14/14 [==============================] - 16s 1s/step - loss: 0.1523 - accuracy: 0.9409 - val_loss: 0.1652 - val_accuracy: 0.9435
Epoch 107/250
14/14 [==============================] - 16s 1s/step - loss: 0.1456 - accuracy: 0.9358 - val_loss: 0.1700 - val_accuracy: 0.9452
Epoch 108/250
14/14 [==============================] - 16s 1s/step - loss: 0.1393 - accuracy: 0.9381 - val_loss: 0.1634 - val_accuracy: 0.9460
Epoch 109/250
14/14 [==============================] - 16s 1s/step - loss: 0.1358 - accuracy: 0.9380 - val_loss: 0.1577 - val_accuracy: 0.9458
Epoch 110/250
14/14 [==============================] - 16s 1s/step - loss: 0.1374 - accuracy: 0.9384 - val_loss: 0.1576 - val_accuracy: 0.9501
Epoch 111/250
14/14 [==============================] - 16s 1s/step - loss: 0.1320 - accuracy: 0.9428 - val_loss: 0.1556 - val_accuracy: 0.9530
Epoch 112/250
14/14 [==============================] - 16s 1s/step - loss: 0.1237 - accuracy: 0.9460 - val_loss: 0.1594 - val_accuracy: 0.9556
Epoch 113/250
14/14 [==============================] - 16s 1s/step - loss: 0.1194 - accuracy: 0.9511 - val_loss: 0.1668 - val_accuracy: 0.9577
Epoch 114/250
14/14 [==============================] - 16s 1s/step - loss: 0.1572 - accuracy: 0.9502 - val_loss: 0.1732 - val_accuracy: 0.9588
Epoch 115/250
14/14 [==============================] - 16s 1s/step - loss: 0.1807 - accuracy: 0.9492 - val_loss: 0.1828 - val_accuracy: 0.9596
Epoch 116/250
14/14 [==============================] - 16s 1s/step - loss: 0.1672 - accuracy: 0.9530 - val_loss: 0.1862 - val_accuracy: 0.9602
Epoch 117/250
14/14 [==============================] - 16s 1s/step - loss: 0.1540 - accuracy: 0.9547 - val_loss: 0.1932 - val_accuracy: 0.9607
Epoch 118/250
14/14 [==============================] - 16s 1s/step - loss: 0.1372 - accuracy: 0.9595 - val_loss: 0.2042 - val_accuracy: 0.9610
Epoch 119/250
14/14 [==============================] - 16s 1s/step - loss: 0.1272 - accuracy: 0.9629 - val_loss: 0.2428 - val_accuracy: 0.9613
Epoch 120/250
14/14 [==============================] - 16s 1s/step - loss: 0.1342 - accuracy: 0.9681 - val_loss: 0.2482 - val_accuracy: 0.9622
Epoch 121/250
14/14 [==============================] - 16s 1s/step - loss: 0.1522 - accuracy: 0.9678 - val_loss: 0.2283 - val_accuracy: 0.9617
Epoch 122/250
14/14 [==============================] - 16s 1s/step - loss: 0.1623 - accuracy: 0.9630 - val_loss: 0.2099 - val_accuracy: 0.9622
Epoch 123/250
14/14 [==============================] - 16s 1s/step - loss: 0.1659 - accuracy: 0.9621 - val_loss: 0.2027 - val_accuracy: 0.9622
Epoch 124/250
14/14 [==============================] - 16s 1s/step - loss: 0.1446 - accuracy: 0.9652 - val_loss: 0.1822 - val_accuracy: 0.9623
Epoch 125/250
14/14 [==============================] - 16s 1s/step - loss: 0.1371 - accuracy: 0.9656 - val_loss: 0.1837 - val_accuracy: 0.9622
Epoch 126/250
14/14 [==============================] - 16s 1s/step - loss: 0.1280 - accuracy: 0.9682 - val_loss: 0.1772 - val_accuracy: 0.9617
Epoch 127/250
14/14 [==============================] - 16s 1s/step - loss: 0.1237 - accuracy: 0.9698 - val_loss: 0.1725 - val_accuracy: 0.9614
Epoch 128/250
14/14 [==============================] - 16s 1s/step - loss: 0.1172 - accuracy: 0.9710 - val_loss: 0.1804 - val_accuracy: 0.9612
Epoch 129/250
14/14 [==============================] - 16s 1s/step - loss: 0.1241 - accuracy: 0.9710 - val_loss: 0.1923 - val_accuracy: 0.9611
Epoch 130/250
14/14 [==============================] - 16s 1s/step - loss: 0.1087 - accuracy: 0.9748 - val_loss: 0.1865 - val_accuracy: 0.9613
Epoch 131/250
14/14 [==============================] - 16s 1s/step - loss: 0.1012 - accuracy: 0.9755 - val_loss: 0.2086 - val_accuracy: 0.9613
Epoch 132/250
14/14 [==============================] - 16s 1s/step - loss: 0.1312 - accuracy: 0.9651 - val_loss: 0.3090 - val_accuracy: 0.9587
Epoch 133/250
14/14 [==============================] - 16s 1s/step - loss: 0.2263 - accuracy: 0.9447 - val_loss: 0.2858 - val_accuracy: 0.9579
Epoch 134/250
14/14 [==============================] - 16s 1s/step - loss: 0.1949 - accuracy: 0.9464 - val_loss: 0.2633 - val_accuracy: 0.9582
Epoch 135/250
14/14 [==============================] - 16s 1s/step - loss: 0.1681 - accuracy: 0.9522 - val_loss: 0.2585 - val_accuracy: 0.9585
Epoch 136/250
14/14 [==============================] - 16s 1s/step - loss: 0.1620 - accuracy: 0.9549 - val_loss: 0.2306 - val_accuracy: 0.9587
Epoch 137/250
14/14 [==============================] - 16s 1s/step - loss: 0.1508 - accuracy: 0.9583 - val_loss: 0.2073 - val_accuracy: 0.9590
Epoch 138/250
14/14 [==============================] - 16s 1s/step - loss: 0.1342 - accuracy: 0.9630 - val_loss: 0.1897 - val_accuracy: 0.9592
Epoch 139/250
14/14 [==============================] - 16s 1s/step - loss: 0.1234 - accuracy: 0.9661 - val_loss: 0.1859 - val_accuracy: 0.9600
Epoch 140/250
14/14 [==============================] - 16s 1s/step - loss: 0.1303 - accuracy: 0.9634 - val_loss: 0.1981 - val_accuracy: 0.9603
Epoch 141/250
14/14 [==============================] - 16s 1s/step - loss: 0.1347 - accuracy: 0.9642 - val_loss: 0.2118 - val_accuracy: 0.9603
Epoch 142/250
14/14 [==============================] - 16s 1s/step - loss: 0.1379 - accuracy: 0.9656 - val_loss: 0.2064 - val_accuracy: 0.9604
Epoch 143/250
14/14 [==============================] - 16s 1s/step - loss: 0.1335 - accuracy: 0.9676 - val_loss: 0.1844 - val_accuracy: 0.9607
Epoch 144/250
14/14 [==============================] - 16s 1s/step - loss: 0.1207 - accuracy: 0.9686 - val_loss: 0.1756 - val_accuracy: 0.9608
Epoch 145/250
14/14 [==============================] - 16s 1s/step - loss: 0.1148 - accuracy: 0.9700 - val_loss: 0.1858 - val_accuracy: 0.9608
Epoch 146/250
14/14 [==============================] - 16s 1s/step - loss: 0.1083 - accuracy: 0.9734 - val_loss: 0.1923 - val_accuracy: 0.9609
Epoch 147/250
14/14 [==============================] - 16s 1s/step - loss: 0.1078 - accuracy: 0.9748 - val_loss: 0.1665 - val_accuracy: 0.9613
Epoch 148/250
14/14 [==============================] - 16s 1s/step - loss: 0.1064 - accuracy: 0.9751 - val_loss: 0.1478 - val_accuracy: 0.9620
Epoch 149/250
14/14 [==============================] - 16s 1s/step - loss: 0.1012 - accuracy: 0.9761 - val_loss: 0.1569 - val_accuracy: 0.9619
Epoch 150/250
14/14 [==============================] - 16s 1s/step - loss: 0.0975 - accuracy: 0.9750 - val_loss: 0.1826 - val_accuracy: 0.9614
Epoch 151/250
14/14 [==============================] - 16s 1s/step - loss: 0.1464 - accuracy: 0.9663 - val_loss: 0.1941 - val_accuracy: 0.9612
Epoch 152/250
14/14 [==============================] - 16s 1s/step - loss: 0.1779 - accuracy: 0.9615 - val_loss: 0.2068 - val_accuracy: 0.9610
Epoch 153/250
14/14 [==============================] - 16s 1s/step - loss: 0.1659 - accuracy: 0.9670 - val_loss: 0.1937 - val_accuracy: 0.9611
Epoch 154/250
14/14 [==============================] - 16s 1s/step - loss: 0.1472 - accuracy: 0.9687 - val_loss: 0.1839 - val_accuracy: 0.9612
Epoch 155/250
14/14 [==============================] - 16s 1s/step - loss: 0.1301 - accuracy: 0.9696 - val_loss: 0.1503 - val_accuracy: 0.9589
Epoch 156/250
14/14 [==============================] - 16s 1s/step - loss: 0.1159 - accuracy: 0.9664 - val_loss: 0.1535 - val_accuracy: 0.9574
Epoch 157/250
14/14 [==============================] - 16s 1s/step - loss: 0.1114 - accuracy: 0.9677 - val_loss: 0.1657 - val_accuracy: 0.9578
Epoch 158/250
14/14 [==============================] - 16s 1s/step - loss: 0.1157 - accuracy: 0.9679 - val_loss: 0.1716 - val_accuracy: 0.9585
Epoch 159/250
14/14 [==============================] - 16s 1s/step - loss: 0.1172 - accuracy: 0.9676 - val_loss: 0.1711 - val_accuracy: 0.9592
Epoch 160/250
14/14 [==============================] - 16s 1s/step - loss: 0.1254 - accuracy: 0.9665 - val_loss: 0.1730 - val_accuracy: 0.9597
Epoch 161/250
14/14 [==============================] - 16s 1s/step - loss: 0.1158 - accuracy: 0.9700 - val_loss: 0.1695 - val_accuracy: 0.9600
Epoch 162/250
14/14 [==============================] - 16s 1s/step - loss: 0.1132 - accuracy: 0.9708 - val_loss: 0.1597 - val_accuracy: 0.9604
Epoch 163/250
14/14 [==============================] - 16s 1s/step - loss: 0.1213 - accuracy: 0.9683 - val_loss: 0.1979 - val_accuracy: 0.9223
Epoch 164/250
14/14 [==============================] - 16s 1s/step - loss: 0.1974 - accuracy: 0.9456 - val_loss: 0.2165 - val_accuracy: 0.8993
Epoch 165/250
14/14 [==============================] - 16s 1s/step - loss: 0.1910 - accuracy: 0.9383 - val_loss: 0.2084 - val_accuracy: 0.9022
Epoch 166/250
14/14 [==============================] - 16s 1s/step - loss: 0.1753 - accuracy: 0.9416 - val_loss: 0.1918 - val_accuracy: 0.9126
Epoch 167/250
14/14 [==============================] - 16s 1s/step - loss: 0.1523 - accuracy: 0.9427 - val_loss: 0.1806 - val_accuracy: 0.9227
Epoch 168/250
14/14 [==============================] - 16s 1s/step - loss: 0.1408 - accuracy: 0.9459 - val_loss: 0.1759 - val_accuracy: 0.9306
Epoch 169/250
14/14 [==============================] - 16s 1s/step - loss: 0.1459 - accuracy: 0.9486 - val_loss: 0.1730 - val_accuracy: 0.9341
Epoch 170/250
14/14 [==============================] - 16s 1s/step - loss: 0.1817 - accuracy: 0.9466 - val_loss: 0.1702 - val_accuracy: 0.9382
Epoch 171/250
14/14 [==============================] - 16s 1s/step - loss: 0.1675 - accuracy: 0.9507 - val_loss: 0.1672 - val_accuracy: 0.9427
Epoch 172/250
14/14 [==============================] - 16s 1s/step - loss: 0.1522 - accuracy: 0.9564 - val_loss: 0.1651 - val_accuracy: 0.9459
Epoch 173/250
14/14 [==============================] - 16s 1s/step - loss: 0.1483 - accuracy: 0.9586 - val_loss: 0.1628 - val_accuracy: 0.9485
Epoch 174/250
14/14 [==============================] - 16s 1s/step - loss: 0.1427 - accuracy: 0.9612 - val_loss: 0.1608 - val_accuracy: 0.9508
Epoch 175/250
14/14 [==============================] - 16s 1s/step - loss: 0.1299 - accuracy: 0.9652 - val_loss: 0.1570 - val_accuracy: 0.9513
Epoch 176/250
14/14 [==============================] - 16s 1s/step - loss: 0.1208 - accuracy: 0.9678 - val_loss: 0.1575 - val_accuracy: 0.9536
Epoch 177/250
14/14 [==============================] - 16s 1s/step - loss: 0.1273 - accuracy: 0.9674 - val_loss: 0.1526 - val_accuracy: 0.9535
Epoch 178/250
14/14 [==============================] - 16s 1s/step - loss: 0.1258 - accuracy: 0.9665 - val_loss: 0.1507 - val_accuracy: 0.9479
Epoch 179/250
14/14 [==============================] - 16s 1s/step - loss: 0.1301 - accuracy: 0.9649 - val_loss: 0.1481 - val_accuracy: 0.9511
Epoch 180/250
14/14 [==============================] - 16s 1s/step - loss: 0.1260 - accuracy: 0.9671 - val_loss: 0.1466 - val_accuracy: 0.9543
Epoch 181/250
14/14 [==============================] - 16s 1s/step - loss: 0.1185 - accuracy: 0.9690 - val_loss: 0.1485 - val_accuracy: 0.9552
Epoch 182/250
14/14 [==============================] - 16s 1s/step - loss: 0.1126 - accuracy: 0.9707 - val_loss: 0.1506 - val_accuracy: 0.9561
Epoch 183/250
14/14 [==============================] - 16s 1s/step - loss: 0.1053 - accuracy: 0.9737 - val_loss: 0.1470 - val_accuracy: 0.9553
Epoch 184/250
14/14 [==============================] - 16s 1s/step - loss: 0.1046 - accuracy: 0.9742 - val_loss: 0.1474 - val_accuracy: 0.9557
Epoch 185/250
14/14 [==============================] - 16s 1s/step - loss: 0.1005 - accuracy: 0.9760 - val_loss: 0.1506 - val_accuracy: 0.9569
Epoch 186/250
14/14 [==============================] - 16s 1s/step - loss: 0.0929 - accuracy: 0.9780 - val_loss: 0.1523 - val_accuracy: 0.9578
Epoch 187/250
14/14 [==============================] - 16s 1s/step - loss: 0.0955 - accuracy: 0.9757 - val_loss: 0.1551 - val_accuracy: 0.9587
Epoch 188/250
14/14 [==============================] - 16s 1s/step - loss: 0.1475 - accuracy: 0.9672 - val_loss: 0.1449 - val_accuracy: 0.9587
Epoch 189/250
14/14 [==============================] - 16s 1s/step - loss: 0.1703 - accuracy: 0.9633 - val_loss: 0.1450 - val_accuracy: 0.9595
Epoch 190/250
14/14 [==============================] - 16s 1s/step - loss: 0.1561 - accuracy: 0.9669 - val_loss: 0.1700 - val_accuracy: 0.9296
Epoch 191/250
14/14 [==============================] - 16s 1s/step - loss: 0.1489 - accuracy: 0.9545 - val_loss: 0.1811 - val_accuracy: 0.9153
Epoch 192/250
14/14 [==============================] - 16s 1s/step - loss: 0.1408 - accuracy: 0.9534 - val_loss: 0.1675 - val_accuracy: 0.9317
Epoch 193/250
14/14 [==============================] - 16s 1s/step - loss: 0.1293 - accuracy: 0.9591 - val_loss: 0.1523 - val_accuracy: 0.9475
Epoch 194/250
14/14 [==============================] - 16s 1s/step - loss: 0.1229 - accuracy: 0.9632 - val_loss: 0.1447 - val_accuracy: 0.9534
Epoch 195/250
14/14 [==============================] - 16s 1s/step - loss: 0.1229 - accuracy: 0.9665 - val_loss: 0.1394 - val_accuracy: 0.9586
Epoch 196/250
14/14 [==============================] - 16s 1s/step - loss: 0.1264 - accuracy: 0.9671 - val_loss: 0.1373 - val_accuracy: 0.9613
Epoch 197/250
14/14 [==============================] - 16s 1s/step - loss: 0.1352 - accuracy: 0.9671 - val_loss: 0.1411 - val_accuracy: 0.9622
Epoch 198/250
14/14 [==============================] - 16s 1s/step - loss: 0.1227 - accuracy: 0.9712 - val_loss: 0.1465 - val_accuracy: 0.9623
Epoch 199/250
14/14 [==============================] - 16s 1s/step - loss: 0.1188 - accuracy: 0.9730 - val_loss: 0.1539 - val_accuracy: 0.9623
Epoch 200/250
14/14 [==============================] - 16s 1s/step - loss: 0.1113 - accuracy: 0.9744 - val_loss: 0.1567 - val_accuracy: 0.9623
Epoch 201/250
14/14 [==============================] - 16s 1s/step - loss: 0.1063 - accuracy: 0.9758 - val_loss: 0.1570 - val_accuracy: 0.9622
Epoch 202/250
14/14 [==============================] - 16s 1s/step - loss: 0.0993 - accuracy: 0.9778 - val_loss: 0.1707 - val_accuracy: 0.9621
Epoch 203/250
14/14 [==============================] - 16s 1s/step - loss: 0.1021 - accuracy: 0.9783 - val_loss: 0.2423 - val_accuracy: 0.9613
Epoch 204/250
14/14 [==============================] - 16s 1s/step - loss: 0.1084 - accuracy: 0.9706 - val_loss: 0.3699 - val_accuracy: 0.9535
Epoch 205/250
14/14 [==============================] - 16s 1s/step - loss: 0.1260 - accuracy: 0.9530 - val_loss: 0.3157 - val_accuracy: 0.9504
Epoch 206/250
14/14 [==============================] - 16s 1s/step - loss: 0.1568 - accuracy: 0.9467 - val_loss: 0.2442 - val_accuracy: 0.9481
Epoch 207/250
14/14 [==============================] - 16s 1s/step - loss: 0.1846 - accuracy: 0.9431 - val_loss: 0.2108 - val_accuracy: 0.9470
Epoch 208/250
14/14 [==============================] - 16s 1s/step - loss: 0.1600 - accuracy: 0.9484 - val_loss: 0.2013 - val_accuracy: 0.9485
Epoch 209/250
14/14 [==============================] - 16s 1s/step - loss: 0.1425 - accuracy: 0.9541 - val_loss: 0.1956 - val_accuracy: 0.9504
Epoch 210/250
14/14 [==============================] - 16s 1s/step - loss: 0.1377 - accuracy: 0.9570 - val_loss: 0.1898 - val_accuracy: 0.9518
Epoch 211/250
14/14 [==============================] - 16s 1s/step - loss: 0.1332 - accuracy: 0.9593 - val_loss: 0.1606 - val_accuracy: 0.9486
Epoch 212/250
14/14 [==============================] - 16s 1s/step - loss: 0.1166 - accuracy: 0.9610 - val_loss: 0.1522 - val_accuracy: 0.9482
Epoch 213/250
14/14 [==============================] - 16s 1s/step - loss: 0.1101 - accuracy: 0.9624 - val_loss: 0.1516 - val_accuracy: 0.9498
Epoch 214/250
14/14 [==============================] - 16s 1s/step - loss: 0.1143 - accuracy: 0.9625 - val_loss: 0.1516 - val_accuracy: 0.9516
Epoch 215/250
14/14 [==============================] - 16s 1s/step - loss: 0.1185 - accuracy: 0.9637 - val_loss: 0.1501 - val_accuracy: 0.9521
Epoch 216/250
14/14 [==============================] - 16s 1s/step - loss: 0.1222 - accuracy: 0.9635 - val_loss: 0.1523 - val_accuracy: 0.9531
Epoch 217/250
14/14 [==============================] - 16s 1s/step - loss: 0.1176 - accuracy: 0.9654 - val_loss: 0.1538 - val_accuracy: 0.9541
Epoch 218/250
14/14 [==============================] - 16s 1s/step - loss: 0.1091 - accuracy: 0.9670 - val_loss: 0.1511 - val_accuracy: 0.9544
Epoch 219/250
14/14 [==============================] - 16s 1s/step - loss: 0.1040 - accuracy: 0.9687 - val_loss: 0.1486 - val_accuracy: 0.9547
Epoch 220/250
14/14 [==============================] - 16s 1s/step - loss: 0.0959 - accuracy: 0.9713 - val_loss: 0.1454 - val_accuracy: 0.9541
Epoch 221/250
14/14 [==============================] - 16s 1s/step - loss: 0.0949 - accuracy: 0.9721 - val_loss: 0.1441 - val_accuracy: 0.9543
Epoch 222/250
14/14 [==============================] - 16s 1s/step - loss: 0.0922 - accuracy: 0.9737 - val_loss: 0.1418 - val_accuracy: 0.9546
Epoch 223/250
14/14 [==============================] - 16s 1s/step - loss: 0.0840 - accuracy: 0.9756 - val_loss: 0.1395 - val_accuracy: 0.9542
Epoch 224/250
14/14 [==============================] - 16s 1s/step - loss: 0.0859 - accuracy: 0.9730 - val_loss: 0.1395 - val_accuracy: 0.9504
Epoch 225/250
14/14 [==============================] - 16s 1s/step - loss: 0.1269 - accuracy: 0.9648 - val_loss: 0.1425 - val_accuracy: 0.9512
Epoch 226/250
14/14 [==============================] - 16s 1s/step - loss: 0.1477 - accuracy: 0.9606 - val_loss: 0.1403 - val_accuracy: 0.9448
Epoch 227/250
14/14 [==============================] - 16s 1s/step - loss: 0.1315 - accuracy: 0.9635 - val_loss: 0.1415 - val_accuracy: 0.9510
Epoch 228/250
14/14 [==============================] - 16s 1s/step - loss: 0.1223 - accuracy: 0.9667 - val_loss: 0.1489 - val_accuracy: 0.9543
Epoch 229/250
14/14 [==============================] - 16s 1s/step - loss: 0.1117 - accuracy: 0.9693 - val_loss: 0.1527 - val_accuracy: 0.9558
Epoch 230/250
14/14 [==============================] - 16s 1s/step - loss: 0.1093 - accuracy: 0.9703 - val_loss: 0.1476 - val_accuracy: 0.9557
Epoch 231/250
14/14 [==============================] - 16s 1s/step - loss: 0.0978 - accuracy: 0.9727 - val_loss: 0.1351 - val_accuracy: 0.9547
Epoch 232/250
14/14 [==============================] - 16s 1s/step - loss: 0.0970 - accuracy: 0.9726 - val_loss: 0.1331 - val_accuracy: 0.9549
Epoch 233/250
14/14 [==============================] - 16s 1s/step - loss: 0.0995 - accuracy: 0.9716 - val_loss: 0.1323 - val_accuracy: 0.9558
Epoch 234/250
14/14 [==============================] - 16s 1s/step - loss: 0.1120 - accuracy: 0.9696 - val_loss: 0.1371 - val_accuracy: 0.9563
Epoch 235/250
14/14 [==============================] - 16s 1s/step - loss: 0.1023 - accuracy: 0.9720 - val_loss: 0.1330 - val_accuracy: 0.9555
Epoch 236/250
14/14 [==============================] - 16s 1s/step - loss: 0.0965 - accuracy: 0.9722 - val_loss: 0.1385 - val_accuracy: 0.9486
Epoch 237/250
14/14 [==============================] - 16s 1s/step - loss: 0.0962 - accuracy: 0.9689 - val_loss: 0.1761 - val_accuracy: 0.9131
Epoch 238/250
14/14 [==============================] - 16s 1s/step - loss: 0.1283 - accuracy: 0.9431 - val_loss: 0.2964 - val_accuracy: 0.9120
Epoch 239/250
14/14 [==============================] - 16s 1s/step - loss: 0.2239 - accuracy: 0.9123 - val_loss: 0.3699 - val_accuracy: 0.9168
Epoch 240/250
14/14 [==============================] - 16s 1s/step - loss: 0.2136 - accuracy: 0.9060 - val_loss: 0.2454 - val_accuracy: 0.9111
Epoch 241/250
14/14 [==============================] - 16s 1s/step - loss: 0.1519 - accuracy: 0.9269 - val_loss: 0.1921 - val_accuracy: 0.9045
Epoch 242/250
14/14 [==============================] - 16s 1s/step - loss: 0.1238 - accuracy: 0.9377 - val_loss: 0.1796 - val_accuracy: 0.9037
Epoch 243/250
14/14 [==============================] - 16s 1s/step - loss: 0.1302 - accuracy: 0.9409 - val_loss: 0.1721 - val_accuracy: 0.9111
Epoch 244/250
14/14 [==============================] - 16s 1s/step - loss: 0.1675 - accuracy: 0.9412 - val_loss: 0.1657 - val_accuracy: 0.9217
Epoch 245/250
14/14 [==============================] - 16s 1s/step - loss: 0.1525 - accuracy: 0.9464 - val_loss: 0.1595 - val_accuracy: 0.9295
Epoch 246/250
14/14 [==============================] - 16s 1s/step - loss: 0.1361 - accuracy: 0.9535 - val_loss: 0.1563 - val_accuracy: 0.9356
Epoch 247/250
14/14 [==============================] - 16s 1s/step - loss: 0.1319 - accuracy: 0.9575 - val_loss: 0.1544 - val_accuracy: 0.9410
Epoch 248/250
14/14 [==============================] - 16s 1s/step - loss: 0.1290 - accuracy: 0.9610 - val_loss: 0.1530 - val_accuracy: 0.9451
Epoch 249/250
14/14 [==============================] - 16s 1s/step - loss: 0.1163 - accuracy: 0.9651 - val_loss: 0.1454 - val_accuracy: 0.9452
Epoch 250/250
14/14 [==============================] - 16s 1s/step - loss: 0.1070 - accuracy: 0.9672 - val_loss: 0.1411 - val_accuracy: 0.9460

Manually Save Model

In [34]:
# ## Save the model at the latest EPOCH or as desired
# model.save(f"./saved_models/model_{EPOCHS}.h5")

Load

In [35]:
MODEL_PATH = "./saved_models/model_20.h5" #Change this to the model path you want to load

model.load_weights(MODEL_PATH, by_name=True)

Process Prediction

In [36]:
import sys
np.set_printoptions(threshold=sys.maxsize)
In [37]:
#Pick first batch of Image from Validation set
for image, mask in dataset['val'].take(1):
    sample_image, sample_mask = image, mask
In [38]:
#Pick first Prediction Validation set
sample_idx = 3
pred_mask = model.predict(sample_image)

sample_image = sample_image.numpy()[sample_idx] #Pick first image form a batch of 5
sample_image = (sample_image*255.0).astype(np.uint32)

#Ground Truth Mask
sample_mask = sample_mask.numpy()[sample_idx]
sample_mask = np.squeeze(sample_mask, axis =-1)

#Predicted Mask
pred_mask = pred_mask[sample_idx] #Pick first mask form a batch of 5
In [39]:
print(sample_image.shape)
print(pred_mask.shape)
(256, 256, 3)
(256, 256, 256)
In [40]:
pred_mask = np.argmax(pred_mask, axis =-1)
#pred_mask = np.expand_dims(pred_mask, axis =-1)
print(sample_image.shape)
print(pred_mask.shape)
(256, 256, 3)
(256, 256)
In [41]:
#print(pred_mask)

Visualize Prediction

In [42]:
colors = visualize.random_colors(len(img_meta_obj.annotations))
color = colors[0]
print(color)

masked_image = sample_image.astype(np.uint32).copy()
masked_image = visualize.apply_mask(masked_image, pred_mask, color)

visualize.display_images([masked_image])
(0.8648648648648649, 0.0, 1.0)
In [43]:
# Vizualize Ground Truth
In [44]:
#GT
colors = visualize.random_colors(len(img_meta_obj.annotations))
color = colors[0]
print(color)

masked_image = sample_image.astype(np.uint32).copy()

print(masked_image.shape)
print(sample_mask.shape)

masked_image = visualize.apply_mask(masked_image, sample_mask, color)

visualize.display_images([masked_image])
(0.0, 0.9189189189189193, 1.0)
(256, 256, 3)
(256, 256)
In [45]:
import sys
np.set_printoptions(threshold=sys.maxsize)
sample_idx=0
#Pick first batch of Image from Validation set
for image, mask in dataset['val'].take(5):
    sample_image, sample_mask = image, mask
  #Pick first Prediction Validation set
#for sample_idx in range(1):
    
    pred_mask = model.predict(sample_image)

    sample_image = sample_image.numpy()[sample_idx] #Pick first image form a batch of 5
    sample_image = (sample_image*255.0).astype(np.uint32)

    #Ground Truth Mask
    sample_mask = sample_mask.numpy()[sample_idx]
    sample_mask = np.squeeze(sample_mask, axis =-1)

    #Predicted Mask
    pred_mask = pred_mask[sample_idx] #Pick first mask form a batch of 5
    pred_mask_IoU = np.argmax(pred_mask, axis =-1)
    #Mean IoU 
    m = tf.keras.metrics.MeanIoU(num_classes=3)
    m.update_state(sample_mask, pred_mask_IoU)
    print("IoU =",m.result().numpy())

    #prediction visualization
    colors = visualize.random_colors(len(img_meta_obj.annotations))
    color = colors[0]
    print(color)

    masked_image = sample_image.astype(np.uint32).copy()
    pred_mask = np.argmax(pred_mask, axis =-1)
    print(masked_image.shape)
    print(pred_mask.shape)
    masked_image = visualize.apply_mask(masked_image, pred_mask, color)
    print("predict image")
    visualize.display_images([masked_image])
    #GT
    colors = visualize.random_colors(len(img_meta_obj.annotations))
    color = colors[0]
    print(color)


    masked_image = sample_image.astype(np.uint32).copy()

    print(masked_image.shape)
    print(sample_mask.shape)

    masked_image = visualize.apply_mask(masked_image, sample_mask, color)
    print("Ground Truth Image")
    visualize.display_images([masked_image])
    sample_idx=sample_idx+1
IoU = 0.50570077
(1.0, 0.0, 0.0)
(256, 256, 3)
(256, 256)
predict image
(0.7027027027027026, 1.0, 0.0)
(256, 256, 3)
(256, 256)
Ground Truth Image
IoU = 0.5000879
(0.7027027027027026, 0.0, 1.0)
(256, 256, 3)
(256, 256)
predict image
(0.3783783783783783, 1.0, 0.0)
(256, 256, 3)
(256, 256)
Ground Truth Image
IoU = 0.5037121
(0.0, 0.10810810810810789, 1.0)
(256, 256, 3)
(256, 256)
predict image
(0.8648648648648649, 1.0, 0.0)
(256, 256, 3)
(256, 256)
Ground Truth Image
IoU = 0.4924397
(0.8648648648648649, 1.0, 0.0)
(256, 256, 3)
(256, 256)
predict image
(1.0, 0.32432432432432434, 0.0)
(256, 256, 3)
(256, 256)
Ground Truth Image
IoU = 0.4889361
(0.0, 1.0, 0.43243243243243246)
(256, 256, 3)
(256, 256)
predict image
(0.21621621621621623, 1.0, 0.0)
(256, 256, 3)
(256, 256)
Ground Truth Image
In [48]:
import sys
np.set_printoptions(threshold=sys.maxsize)
sample_idx=0
#Pick first batch of Image from Validation set
for image, mask in dataset['val'].take(5):
    sample_image, sample_mask = image, mask
  #Pick first Prediction Validation set
#for sample_idx in range(1):
    
    pred_mask = model.predict(sample_image)

    sample_image = sample_image.numpy()[sample_idx] #Pick first image form a batch of 5
    sample_image = (sample_image*255.0).astype(np.uint32)

    #Ground Truth Mask
    sample_mask = sample_mask.numpy()[sample_idx]
    sample_mask = np.squeeze(sample_mask, axis =-1)

    #Predicted Mask
    pred_mask = pred_mask[sample_idx] #Pick first mask form a batch of 5
    pred_mask_IoU = np.argmax(pred_mask, axis =-1)
    #Mean IoU 
    m = tf.keras.metrics.MeanIoU(num_classes=3)
    m.update_state(sample_mask, pred_mask_IoU)
    print("IoU =",m.result().numpy())

    #prediction visualization
    colors = visualize.random_colors(len(img_meta_obj.annotations))
    color = colors[0]
    print(color)

    masked_image = sample_image.astype(np.uint32).copy()
    pred_mask = np.argmax(pred_mask, axis =-1)
    print(masked_image.shape)
    print(pred_mask.shape)
    masked_image = visualize.apply_mask(masked_image, pred_mask, color)
    print("predict image")
    visualize.display_images([masked_image])
    #GT
    colors = visualize.random_colors(len(img_meta_obj.annotations))
    color = colors[0]
    print(color)


    masked_image = sample_image.astype(np.uint32).copy()

    print(masked_image.shape)
    print(sample_mask.shape)

    masked_image = visualize.apply_mask(masked_image, sample_mask, color)
    print("Ground Truth Image")
    visualize.display_images([masked_image])
    sample_idx=sample_idx+1
IoU = 0.50570077
(1.0, 0.0, 0.6486486486486491)
(256, 256, 3)
(256, 256)
predict image
(0.8648648648648649, 1.0, 0.0)
(256, 256, 3)
(256, 256)
Ground Truth Image
IoU = 0.5000879
(0.0, 1.0, 0.756756756756757)
(256, 256, 3)
(256, 256)
predict image
(1.0, 0.0, 0.6486486486486491)
(256, 256, 3)
(256, 256)
Ground Truth Image
IoU = 0.5037121
(0.0, 1.0, 0.43243243243243246)
(256, 256, 3)
(256, 256)
predict image
(0.21621621621621667, 0.0, 1.0)
(256, 256, 3)
(256, 256)
Ground Truth Image
IoU = 0.4924397
(0.7027027027027026, 0.0, 1.0)
(256, 256, 3)
(256, 256)
predict image
(0.7027027027027026, 1.0, 0.0)
(256, 256, 3)
(256, 256)
Ground Truth Image
IoU = 0.4889361
(0.7027027027027026, 1.0, 0.0)
(256, 256, 3)
(256, 256)
predict image
(0.0, 1.0, 0.5945945945945947)
(256, 256, 3)
(256, 256)
Ground Truth Image